diff mbox

Add virtio gpu driver.

Message ID 1427213239-8775-1-git-send-email-kraxel@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gerd Hoffmann March 24, 2015, 4:07 p.m. UTC
From: Dave Airlie <airlied@gmail.com>

This patch adds a kms driver for the virtio gpu.  The xorg modesetting
driver can handle the device just fine, the framebuffer for fbcon is
there too.

Qemu patches for the host side are under review currently.

The pci version of the device comes in two variants: with and without
vga compatibility.  The former has a extra memory bar for the vga
framebuffer, the later is a pure virtio device.  The only concern for
this driver is that in the virtio-vga case we have to kick out the
firmware framebuffer.

Initial revision has only 2d support, 3d (virgl) support requires
some more work on the qemu side and will be added later.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 drivers/gpu/drm/Kconfig                  |   2 +
 drivers/gpu/drm/Makefile                 |   1 +
 drivers/gpu/drm/virtio/Kconfig           |  11 +
 drivers/gpu/drm/virtio/Makefile          |   9 +
 drivers/gpu/drm/virtio/virtgpu_debugfs.c |  64 ++++
 drivers/gpu/drm/virtio/virtgpu_display.c | 527 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_drm_bus.c |  68 ++++
 drivers/gpu/drm/virtio/virtgpu_drv.c     | 132 ++++++++
 drivers/gpu/drm/virtio/virtgpu_drv.h     | 326 +++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_fb.c      | 415 ++++++++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_fence.c   |  95 ++++++
 drivers/gpu/drm/virtio/virtgpu_gem.c     | 120 +++++++
 drivers/gpu/drm/virtio/virtgpu_kms.c     | 125 +++++++
 drivers/gpu/drm/virtio/virtgpu_object.c  | 174 ++++++++++
 drivers/gpu/drm/virtio/virtgpu_ttm.c     | 451 ++++++++++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_vq.c      | 540 +++++++++++++++++++++++++++++++
 drivers/virtio/virtio_pci_common.c       |   2 +-
 include/drm/drmP.h                       |   1 +
 include/uapi/linux/Kbuild                |   1 +
 include/uapi/linux/virtio_gpu.h          | 203 ++++++++++++
 include/uapi/linux/virtio_ids.h          |   2 +-
 21 files changed, 3267 insertions(+), 2 deletions(-)
 create mode 100644 drivers/gpu/drm/virtio/Kconfig
 create mode 100644 drivers/gpu/drm/virtio/Makefile
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_debugfs.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_display.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_drm_bus.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_drv.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_drv.h
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_fb.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_fence.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_gem.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_kms.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_object.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_ttm.c
 create mode 100644 drivers/gpu/drm/virtio/virtgpu_vq.c
 create mode 100644 include/uapi/linux/virtio_gpu.h

Comments

Michael S. Tsirkin March 24, 2015, 4:15 p.m. UTC | #1
On Tue, Mar 24, 2015 at 05:07:18PM +0100, Gerd Hoffmann wrote:
> From: Dave Airlie <airlied@gmail.com>
> 
> This patch adds a kms driver for the virtio gpu.  The xorg modesetting
> driver can handle the device just fine, the framebuffer for fbcon is
> there too.
> 
> Qemu patches for the host side are under review currently.
> 
> The pci version of the device comes in two variants: with and without
> vga compatibility.  The former has a extra memory bar for the vga
> framebuffer, the later is a pure virtio device.  The only concern for
> this driver is that in the virtio-vga case we have to kick out the
> firmware framebuffer.
> 
> Initial revision has only 2d support, 3d (virgl) support requires
> some more work on the qemu side and will be added later.
> 
> Signed-off-by: Dave Airlie <airlied@redhat.com>
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>

...

> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> index e894eb2..a3167fa 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -510,7 +510,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
>  		goto err_enable_device;
>  
>  	rc = pci_request_regions(pci_dev, "virtio-pci");
> -	if (rc)
> +	if (rc && ((pci_dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
>  		goto err_request_regions;
>  
>  	if (force_legacy) {

This is probably what you described as "the only concern?  Can you
explain why you are doing this?  If we only need to request specific
regions, I think we should do exactly that, requesting only parts of
regions that are covered by the virtio capabilities.

Seems cleaner than looking for a specific class.

Didn't look at device code in depth yet.
Daniel Vetter March 24, 2015, 4:50 p.m. UTC | #2
On Tue, Mar 24, 2015 at 05:07:18PM +0100, Gerd Hoffmann wrote:
> From: Dave Airlie <airlied@gmail.com>
> 
> This patch adds a kms driver for the virtio gpu.  The xorg modesetting
> driver can handle the device just fine, the framebuffer for fbcon is
> there too.
> 
> Qemu patches for the host side are under review currently.
> 
> The pci version of the device comes in two variants: with and without
> vga compatibility.  The former has a extra memory bar for the vga
> framebuffer, the later is a pure virtio device.  The only concern for
> this driver is that in the virtio-vga case we have to kick out the
> firmware framebuffer.
> 
> Initial revision has only 2d support, 3d (virgl) support requires
> some more work on the qemu side and will be added later.
> 
> Signed-off-by: Dave Airlie <airlied@redhat.com>
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>

Standard request from my side for new drm drivers (especially if they're
this simple): Can you please update the drivers to latest drm internal
interfaces, i.e. using universal planes and atomic?

Thanks, Daniel

> ---
>  drivers/gpu/drm/Kconfig                  |   2 +
>  drivers/gpu/drm/Makefile                 |   1 +
>  drivers/gpu/drm/virtio/Kconfig           |  11 +
>  drivers/gpu/drm/virtio/Makefile          |   9 +
>  drivers/gpu/drm/virtio/virtgpu_debugfs.c |  64 ++++
>  drivers/gpu/drm/virtio/virtgpu_display.c | 527 ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_drm_bus.c |  68 ++++
>  drivers/gpu/drm/virtio/virtgpu_drv.c     | 132 ++++++++
>  drivers/gpu/drm/virtio/virtgpu_drv.h     | 326 +++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_fb.c      | 415 ++++++++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_fence.c   |  95 ++++++
>  drivers/gpu/drm/virtio/virtgpu_gem.c     | 120 +++++++
>  drivers/gpu/drm/virtio/virtgpu_kms.c     | 125 +++++++
>  drivers/gpu/drm/virtio/virtgpu_object.c  | 174 ++++++++++
>  drivers/gpu/drm/virtio/virtgpu_ttm.c     | 451 ++++++++++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_vq.c      | 540 +++++++++++++++++++++++++++++++
>  drivers/virtio/virtio_pci_common.c       |   2 +-
>  include/drm/drmP.h                       |   1 +
>  include/uapi/linux/Kbuild                |   1 +
>  include/uapi/linux/virtio_gpu.h          | 203 ++++++++++++
>  include/uapi/linux/virtio_ids.h          |   2 +-
>  21 files changed, 3267 insertions(+), 2 deletions(-)
>  create mode 100644 drivers/gpu/drm/virtio/Kconfig
>  create mode 100644 drivers/gpu/drm/virtio/Makefile
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_debugfs.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_display.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_drm_bus.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_drv.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_drv.h
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_fb.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_fence.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_gem.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_kms.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_object.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_ttm.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_vq.c
>  create mode 100644 include/uapi/linux/virtio_gpu.h
> 
> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index 151a050..f2388ea 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -197,6 +197,8 @@ source "drivers/gpu/drm/qxl/Kconfig"
>  
>  source "drivers/gpu/drm/bochs/Kconfig"
>  
> +source "drivers/gpu/drm/virtio/Kconfig"
> +
>  source "drivers/gpu/drm/msm/Kconfig"
>  
>  source "drivers/gpu/drm/tegra/Kconfig"
> diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> index 2c239b9..083d443 100644
> --- a/drivers/gpu/drm/Makefile
> +++ b/drivers/gpu/drm/Makefile
> @@ -62,6 +62,7 @@ obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
>  obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
>  obj-$(CONFIG_DRM_QXL) += qxl/
>  obj-$(CONFIG_DRM_BOCHS) += bochs/
> +obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
>  obj-$(CONFIG_DRM_MSM) += msm/
>  obj-$(CONFIG_DRM_TEGRA) += tegra/
>  obj-$(CONFIG_DRM_STI) += sti/
> diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
> new file mode 100644
> index 0000000..55868e2
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/Kconfig
> @@ -0,0 +1,11 @@
> +config DRM_VIRTIO_GPU
> +	tristate "QEMU Virtio GPU"
> +	depends on DRM && VIRTIO
> +	select FB_SYS_FILLRECT
> +	select FB_SYS_COPYAREA
> +	select FB_SYS_IMAGEBLIT
> +        select DRM_KMS_HELPER
> +        select DRM_KMS_FB_HELPER
> +        select DRM_TTM
> +	help
> +	   QEMU based virtio GPU.
> diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
> new file mode 100644
> index 0000000..57d59ee
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/Makefile
> @@ -0,0 +1,9 @@
> +#
> +# Makefile for the drm device driver.  This driver provides support for the
> +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
> +
> +ccflags-y := -Iinclude/drm
> +
> +virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o
> +
> +obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
> diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> new file mode 100644
> index 0000000..dbc497d
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> @@ -0,0 +1,64 @@
> +/*
> + * Copyright (C) 2009 Red Hat
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining
> + * a copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sublicense, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial
> + * portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
> + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
> + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
> + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
> + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#include <linux/debugfs.h>
> +
> +#include "drmP.h"
> +#include "virtgpu_drv.h"
> +
> +static int
> +virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
> +{
> +	struct drm_info_node *node = (struct drm_info_node *) m->private;
> +	struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
> +
> +	seq_printf(m, "fence %ld %lld\n",
> +		   atomic64_read(&vgdev->fence_drv.last_seq),
> +		   vgdev->fence_drv.sync_seq);
> +	return 0;
> +}
> +
> +static struct drm_info_list virtio_gpu_debugfs_list[] = {
> +	{ "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
> +};
> +
> +#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
> +
> +int
> +virtio_gpu_debugfs_init(struct drm_minor *minor)
> +{
> +	drm_debugfs_create_files(virtio_gpu_debugfs_list,
> +				 VIRTIO_GPU_DEBUGFS_ENTRIES,
> +				 minor->debugfs_root, minor);
> +	return 0;
> +}
> +
> +void
> +virtio_gpu_debugfs_takedown(struct drm_minor *minor)
> +{
> +	drm_debugfs_remove_files(virtio_gpu_debugfs_list,
> +				 VIRTIO_GPU_DEBUGFS_ENTRIES,
> +				 minor);
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
> new file mode 100644
> index 0000000..578a02c
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_display.c
> @@ -0,0 +1,527 @@
> +/*
> + * Copyright 2013 Red Hat Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors: Dave Airlie
> + *          Alon Levy
> + */
> +
> +#include "virtgpu_drv.h"
> +#include <drm/drm_crtc_helper.h>
> +#include <drm/drm_plane_helper.h>
> +
> +#define XRES_MIN   320
> +#define YRES_MIN   200
> +
> +#define XRES_DEF  1024
> +#define YRES_DEF   768
> +
> +#define XRES_MAX  8192
> +#define YRES_MAX  8192
> +
> +static void virtio_gpu_crtc_gamma_set(struct drm_crtc *crtc,
> +				      u16 *red, u16 *green, u16 *blue,
> +				      uint32_t start, uint32_t size)
> +{
> +	/* TODO */
> +}
> +
> +static void
> +virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
> +		       struct virtio_gpu_output *output)
> +{
> +	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
> +	output->cursor.resource_id = 0;
> +	virtio_gpu_cursor_ping(vgdev, output);
> +}
> +
> +static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
> +				      struct drm_file *file_priv,
> +				      uint32_t handle,
> +				      uint32_t width,
> +				      uint32_t height,
> +				      int32_t hot_x, int32_t hot_y)
> +{
> +	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
> +	struct virtio_gpu_output *output =
> +		container_of(crtc, struct virtio_gpu_output, crtc);
> +	struct drm_gem_object *gobj = NULL;
> +	struct virtio_gpu_object *qobj = NULL;
> +	struct virtio_gpu_fence *fence = NULL;
> +	int ret = 0;
> +
> +	if (handle == 0) {
> +		virtio_gpu_hide_cursor(vgdev, output);
> +		return 0;
> +	}
> +
> +	/* lookup the cursor */
> +	gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
> +	if (gobj == NULL)
> +		return -ENOENT;
> +
> +	qobj = gem_to_virtio_gpu_obj(gobj);
> +
> +	if (!qobj->hw_res_handle) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	ret = virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
> +						 cpu_to_le32(64),
> +						 cpu_to_le32(64),
> +						 0, 0, &fence);
> +	if (!ret) {
> +		reservation_object_add_excl_fence(qobj->tbo.resv,
> +						  &fence->f);
> +		virtio_gpu_object_wait(qobj, false);
> +	}
> +
> +	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
> +	output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
> +	output->cursor.hot_x = cpu_to_le32(hot_x);
> +	output->cursor.hot_y = cpu_to_le32(hot_y);
> +	virtio_gpu_cursor_ping(vgdev, output);
> +out:
> +	drm_gem_object_unreference_unlocked(gobj);
> +	return ret;
> +}
> +
> +static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
> +				    int x, int y)
> +{
> +	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
> +	struct virtio_gpu_output *output =
> +		container_of(crtc, struct virtio_gpu_output, crtc);
> +
> +	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
> +	output->cursor.pos.x = cpu_to_le32(x);
> +	output->cursor.pos.y = cpu_to_le32(y);
> +	virtio_gpu_cursor_ping(vgdev, output);
> +	return 0;
> +}
> +
> +static int virtio_gpu_crtc_page_flip(struct drm_crtc *crtc,
> +				     struct drm_framebuffer *fb,
> +				     struct drm_pending_vblank_event *event,
> +				     uint32_t flags)
> +{
> +	return -EINVAL;
> +}
> +
> +
> +static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
> +	.cursor_set2 = virtio_gpu_crtc_cursor_set,
> +	.cursor_move = virtio_gpu_crtc_cursor_move,
> +	.gamma_set = virtio_gpu_crtc_gamma_set,
> +	.set_config = drm_crtc_helper_set_config,
> +	.page_flip = virtio_gpu_crtc_page_flip,
> +	.destroy = drm_crtc_cleanup,
> +};
> +
> +static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
> +{
> +	struct virtio_gpu_framebuffer *virtio_gpu_fb
> +		= to_virtio_gpu_framebuffer(fb);
> +
> +	if (virtio_gpu_fb->obj)
> +		drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
> +	drm_framebuffer_cleanup(fb);
> +	kfree(virtio_gpu_fb);
> +}
> +
> +static int
> +virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
> +				     struct drm_file *file_priv,
> +				     unsigned flags, unsigned color,
> +				     struct drm_clip_rect *clips,
> +				     unsigned num_clips)
> +{
> +	struct virtio_gpu_framebuffer *virtio_gpu_fb
> +		= to_virtio_gpu_framebuffer(fb);
> +
> +	return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
> +}
> +
> +static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
> +	.destroy = virtio_gpu_user_framebuffer_destroy,
> +	.dirty = virtio_gpu_framebuffer_surface_dirty,
> +};
> +
> +int
> +virtio_gpu_framebuffer_init(struct drm_device *dev,
> +			    struct virtio_gpu_framebuffer *vgfb,
> +			    struct drm_mode_fb_cmd2 *mode_cmd,
> +			    struct drm_gem_object *obj)
> +{
> +	int ret;
> +	struct virtio_gpu_object *bo;
> +	vgfb->obj = obj;
> +
> +	bo = gem_to_virtio_gpu_obj(obj);
> +
> +	ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
> +	if (ret) {
> +		vgfb->obj = NULL;
> +		return ret;
> +	}
> +	drm_helper_mode_fill_fb_struct(&vgfb->base, mode_cmd);
> +
> +	spin_lock_init(&vgfb->dirty_lock);
> +	vgfb->x1 = vgfb->y1 = INT_MAX;
> +	vgfb->x2 = vgfb->y2 = 0;
> +	return 0;
> +}
> +
> +static void virtio_gpu_crtc_dpms(struct drm_crtc *crtc, int mode)
> +{
> +}
> +
> +static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
> +				       const struct drm_display_mode *mode,
> +				       struct drm_display_mode *adjusted_mode)
> +{
> +	return true;
> +}
> +
> +static int virtio_gpu_crtc_mode_set(struct drm_crtc *crtc,
> +				    struct drm_display_mode *mode,
> +				    struct drm_display_mode *adjusted_mode,
> +				    int x, int y,
> +				    struct drm_framebuffer *old_fb)
> +{
> +	struct drm_device *dev = crtc->dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct virtio_gpu_framebuffer *vgfb;
> +	struct virtio_gpu_object *bo, *old_bo = NULL;
> +	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
> +
> +	if (!crtc->primary->fb) {
> +		DRM_DEBUG_KMS("No FB bound\n");
> +		return 0;
> +	}
> +
> +	if (old_fb) {
> +		vgfb = to_virtio_gpu_framebuffer(old_fb);
> +		old_bo = gem_to_virtio_gpu_obj(vgfb->obj);
> +	}
> +	vgfb = to_virtio_gpu_framebuffer(crtc->primary->fb);
> +	bo = gem_to_virtio_gpu_obj(vgfb->obj);
> +	DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
> +		  x, y,
> +		  mode->hdisplay, mode->vdisplay,
> +		  adjusted_mode->hdisplay,
> +		  adjusted_mode->vdisplay);
> +
> +	virtio_gpu_cmd_set_scanout(vgdev, output->index, bo->hw_res_handle,
> +				mode->hdisplay, mode->vdisplay, x, y);
> +
> +	return 0;
> +}
> +
> +static void virtio_gpu_crtc_prepare(struct drm_crtc *crtc)
> +{
> +	DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
> +		  crtc->mode.hdisplay, crtc->mode.vdisplay,
> +		  crtc->x, crtc->y, crtc->enabled);
> +}
> +
> +static void virtio_gpu_crtc_commit(struct drm_crtc *crtc)
> +{
> +	DRM_DEBUG("\n");
> +}
> +
> +static void virtio_gpu_crtc_load_lut(struct drm_crtc *crtc)
> +{
> +}
> +
> +static void virtio_gpu_crtc_disable(struct drm_crtc *crtc)
> +{
> +	struct drm_device *dev = crtc->dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
> +
> +	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
> +}
> +
> +static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
> +	.disable = virtio_gpu_crtc_disable,
> +	.dpms = virtio_gpu_crtc_dpms,
> +	.mode_fixup = virtio_gpu_crtc_mode_fixup,
> +	.mode_set = virtio_gpu_crtc_mode_set,
> +	.prepare = virtio_gpu_crtc_prepare,
> +	.commit = virtio_gpu_crtc_commit,
> +	.load_lut = virtio_gpu_crtc_load_lut,
> +};
> +
> +static void virtio_gpu_enc_dpms(struct drm_encoder *encoder, int mode)
> +{
> +}
> +
> +static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder,
> +				      const struct drm_display_mode *mode,
> +				      struct drm_display_mode *adjusted_mode)
> +{
> +	return true;
> +}
> +
> +static void virtio_gpu_enc_prepare(struct drm_encoder *encoder)
> +{
> +}
> +
> +static void virtio_gpu_enc_commit(struct drm_encoder *encoder)
> +{
> +}
> +
> +static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
> +				    struct drm_display_mode *mode,
> +				    struct drm_display_mode *adjusted_mode)
> +{
> +}
> +
> +static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
> +{
> +	struct virtio_gpu_output *output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +	struct drm_display_mode *mode = NULL;
> +	int count, width, height;
> +
> +	width  = le32_to_cpu(output->info.r.width);
> +	height = le32_to_cpu(output->info.r.height);
> +	count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
> +
> +	if (width == 0 || height == 0) {
> +		width = XRES_DEF;
> +		height = YRES_DEF;
> +		drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
> +	} else {
> +		DRM_DEBUG("add mode: %dx%d\n", width, height);
> +		mode = drm_cvt_mode(connector->dev, width, height, 60,
> +				    false, false, false);
> +		mode->type |= DRM_MODE_TYPE_PREFERRED;
> +		drm_mode_probed_add(connector, mode);
> +		count++;
> +	}
> +
> +	return count;
> +}
> +
> +static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
> +				      struct drm_display_mode *mode)
> +{
> +	struct virtio_gpu_output *output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +	int width, height;
> +
> +	width  = le32_to_cpu(output->info.r.width);
> +	height = le32_to_cpu(output->info.r.height);
> +
> +	if (!(mode->type & DRM_MODE_TYPE_PREFERRED))
> +		return MODE_OK;
> +	if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF)
> +		return MODE_OK;
> +	if (mode->hdisplay <= width  && mode->hdisplay >= width - 16 &&
> +	    mode->vdisplay <= height && mode->vdisplay >= height - 16)
> +		return MODE_OK;
> +
> +	DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay);
> +	return MODE_BAD;
> +}
> +
> +static struct drm_encoder*
> +virtio_gpu_best_encoder(struct drm_connector *connector)
> +{
> +	struct virtio_gpu_output *virtio_gpu_output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +
> +	return &virtio_gpu_output->enc;
> +}
> +
> +
> +static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
> +	.dpms = virtio_gpu_enc_dpms,
> +	.mode_fixup = virtio_gpu_enc_mode_fixup,
> +	.prepare = virtio_gpu_enc_prepare,
> +	.mode_set = virtio_gpu_enc_mode_set,
> +	.commit = virtio_gpu_enc_commit,
> +};
> +
> +static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
> +	.get_modes = virtio_gpu_conn_get_modes,
> +	.mode_valid = virtio_gpu_conn_mode_valid,
> +	.best_encoder = virtio_gpu_best_encoder,
> +};
> +
> +static void virtio_gpu_conn_save(struct drm_connector *connector)
> +{
> +	DRM_DEBUG("\n");
> +}
> +
> +static void virtio_gpu_conn_restore(struct drm_connector *connector)
> +{
> +	DRM_DEBUG("\n");
> +}
> +
> +static enum drm_connector_status virtio_gpu_conn_detect(
> +			struct drm_connector *connector,
> +			bool force)
> +{
> +	struct virtio_gpu_output *output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +
> +	if (output->info.enabled)
> +		return connector_status_connected;
> +	else
> +		return connector_status_disconnected;
> +}
> +
> +static int virtio_gpu_conn_set_property(struct drm_connector *connector,
> +				   struct drm_property *property,
> +				   uint64_t value)
> +{
> +	DRM_DEBUG("\n");
> +	return 0;
> +}
> +
> +static void virtio_gpu_conn_destroy(struct drm_connector *connector)
> +{
> +	struct virtio_gpu_output *virtio_gpu_output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +
> +	drm_connector_unregister(connector);
> +	drm_connector_cleanup(connector);
> +	kfree(virtio_gpu_output);
> +}
> +
> +static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
> +	.dpms = drm_helper_connector_dpms,
> +	.save = virtio_gpu_conn_save,
> +	.restore = virtio_gpu_conn_restore,
> +	.detect = virtio_gpu_conn_detect,
> +	.fill_modes = drm_helper_probe_single_connector_modes,
> +	.set_property = virtio_gpu_conn_set_property,
> +	.destroy = virtio_gpu_conn_destroy,
> +};
> +
> +static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
> +	.destroy = drm_encoder_cleanup,
> +};
> +
> +static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
> +{
> +	struct drm_device *dev = vgdev->ddev;
> +	struct virtio_gpu_output *output = vgdev->outputs + index;
> +	struct drm_connector *connector = &output->conn;
> +	struct drm_encoder *encoder = &output->enc;
> +	struct drm_crtc *crtc = &output->crtc;
> +
> +	output->index = index;
> +	if (index == 0) {
> +		output->info.enabled = cpu_to_le32(true);
> +		output->info.r.width = cpu_to_le32(XRES_DEF);
> +		output->info.r.height = cpu_to_le32(YRES_DEF);
> +	}
> +
> +	drm_crtc_init(dev, crtc, &virtio_gpu_crtc_funcs);
> +	drm_mode_crtc_set_gamma_size(crtc, 256);
> +	drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
> +
> +	drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
> +			   DRM_MODE_CONNECTOR_VIRTUAL);
> +	connector->polled = DRM_CONNECTOR_POLL_HPD;
> +	drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
> +			 DRM_MODE_ENCODER_VIRTUAL);
> +
> +	encoder->possible_crtcs = 1 << index;
> +	drm_mode_connector_attach_encoder(connector, encoder);
> +	drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
> +	drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
> +	drm_connector_register(connector);
> +	return 0;
> +}
> +
> +static struct drm_framebuffer *
> +virtio_gpu_user_framebuffer_create(struct drm_device *dev,
> +				   struct drm_file *file_priv,
> +				   struct drm_mode_fb_cmd2 *mode_cmd)
> +{
> +	struct drm_gem_object *obj = NULL;
> +	struct virtio_gpu_framebuffer *virtio_gpu_fb;
> +	int ret;
> +
> +	/* lookup object associated with res handle */
> +	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
> +	if (!obj)
> +		return ERR_PTR(-EINVAL);
> +
> +	virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
> +	if (virtio_gpu_fb == NULL)
> +		return ERR_PTR(-ENOMEM);
> +
> +	ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
> +	if (ret) {
> +		kfree(virtio_gpu_fb);
> +		if (obj)
> +			drm_gem_object_unreference_unlocked(obj);
> +		return NULL;
> +	}
> +
> +	return &virtio_gpu_fb->base;
> +}
> +
> +static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
> +	.fb_create = virtio_gpu_user_framebuffer_create,
> +};
> +
> +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
> +{
> +	int i;
> +	int ret;
> +
> +	drm_mode_config_init(vgdev->ddev);
> +	vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
> +
> +	/* modes will be validated against the framebuffer size */
> +	vgdev->ddev->mode_config.min_width = XRES_MIN;
> +	vgdev->ddev->mode_config.min_height = YRES_MIN;
> +	vgdev->ddev->mode_config.max_width = XRES_MAX;
> +	vgdev->ddev->mode_config.max_height = YRES_MAX;
> +
> +	for (i = 0 ; i < vgdev->num_scanouts; ++i)
> +		vgdev_output_init(vgdev, i);
> +
> +	/* primary surface must be created by this point, to allow
> +	 * issuing command queue commands and having them read by
> +	 * spice server. */
> +	ret = virtio_gpu_fbdev_init(vgdev);
> +	if (ret)
> +		return ret;
> +
> +	ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
> +
> +	drm_kms_helper_poll_init(vgdev->ddev);
> +	return ret;
> +}
> +
> +void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
> +{
> +	virtio_gpu_fbdev_fini(vgdev);
> +	drm_mode_config_cleanup(vgdev->ddev);
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
> new file mode 100644
> index 0000000..e4b50af
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
> @@ -0,0 +1,68 @@
> +#include <linux/pci.h>
> +
> +#include "virtgpu_drv.h"
> +
> +int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
> +{
> +	struct pci_dev *pdev = dev->pdev;
> +
> +	if (pdev) {
> +		return drm_pci_set_busid(dev, master);
> +	}
> +	return 0;
> +}
> +
> +static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
> +{
> +	struct apertures_struct *ap;
> +	bool primary;
> +	ap = alloc_apertures(1);
> +	if (!ap)
> +		return;
> +
> +	ap->ranges[0].base = pci_resource_start(pci_dev, 2);
> +	ap->ranges[0].size = pci_resource_len(pci_dev, 2);
> +
> +	primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
> +		& IORESOURCE_ROM_SHADOW;
> +
> +	remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
> +
> +	kfree(ap);
> +}
> +
> +int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
> +{
> +	struct drm_device *dev;
> +	int ret;
> +
> +	dev = drm_dev_alloc(driver, &vdev->dev);
> +	if (!dev)
> +		return -ENOMEM;
> +	dev->virtdev = vdev;
> +	vdev->priv = dev;
> +
> +	if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
> +		struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
> +		bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
> +		DRM_INFO("pci: %s detected\n",
> +			 vga ? "virtio-vga" : "virtio-gpu-pci");
> +		dev->pdev = pdev;
> +		if (vga)
> +			virtio_pci_kick_out_firmware_fb(pdev);
> +	}
> +
> +	ret = drm_dev_register(dev, 0);
> +	if (ret)
> +		goto err_free;
> +
> +	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
> +		 driver->major, driver->minor, driver->patchlevel,
> +		 driver->date, dev->primary->index);
> +
> +	return 0;
> +
> +err_free:
> +	drm_dev_unref(dev);
> +	return ret;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
> new file mode 100644
> index 0000000..3662e86
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
> @@ -0,0 +1,132 @@
> +/*
> + * 2011 Red Hat, Inc.
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors:
> + *    Dave Airlie <airlied@redhat.com>
> + */
> +
> +#include <linux/module.h>
> +#include <linux/console.h>
> +#include <linux/pci.h>
> +#include "drmP.h"
> +#include "drm/drm.h"
> +
> +#include "virtgpu_drv.h"
> +static struct drm_driver driver;
> +
> +static int virtio_gpu_modeset = -1;
> +
> +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
> +module_param_named(modeset, virtio_gpu_modeset, int, 0400);
> +
> +static int virtio_gpu_probe(struct virtio_device *vdev)
> +{
> +#ifdef CONFIG_VGA_CONSOLE
> +	if (vgacon_text_force() && virtio_gpu_modeset == -1)
> +		return -EINVAL;
> +#endif
> +
> +	if (virtio_gpu_modeset == 0)
> +		return -EINVAL;
> +
> +	return drm_virtio_init(&driver, vdev);
> +}
> +
> +static void virtio_gpu_remove(struct virtio_device *vdev)
> +{
> +	struct drm_device *dev = vdev->priv;
> +	drm_put_dev(dev);
> +}
> +
> +static void virtio_gpu_config_changed(struct virtio_device *vdev)
> +{
> +	struct drm_device *dev = vdev->priv;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +
> +	schedule_work(&vgdev->config_changed_work);
> +}
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static unsigned int features[] = {
> +};
> +static struct virtio_driver virtio_gpu_driver = {
> +	.feature_table = features,
> +	.feature_table_size = ARRAY_SIZE(features),
> +	.driver.name = KBUILD_MODNAME,
> +	.driver.owner = THIS_MODULE,
> +	.id_table = id_table,
> +	.probe = virtio_gpu_probe,
> +	.remove = virtio_gpu_remove,
> +	.config_changed = virtio_gpu_config_changed
> +};
> +
> +module_virtio_driver(virtio_gpu_driver);
> +
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio GPU driver");
> +MODULE_LICENSE("GPL");
> +
> +static const struct file_operations virtio_gpu_driver_fops = {
> +	.owner = THIS_MODULE,
> +	.open = drm_open,
> +	.mmap = virtio_gpu_mmap,
> +	.poll = drm_poll,
> +	.read = drm_read,
> +	.unlocked_ioctl	= drm_ioctl,
> +	.release = drm_release,
> +#ifdef CONFIG_COMPAT
> +	.compat_ioctl = drm_compat_ioctl,
> +#endif
> +	.llseek = noop_llseek,
> +};
> +
> +
> +static struct drm_driver driver = {
> +	.driver_features = DRIVER_MODESET | DRIVER_GEM,
> +	.set_busid = drm_virtio_set_busid,
> +	.load = virtio_gpu_driver_load,
> +	.unload = virtio_gpu_driver_unload,
> +
> +	.dumb_create = virtio_gpu_mode_dumb_create,
> +	.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
> +	.dumb_destroy = virtio_gpu_mode_dumb_destroy,
> +
> +#if defined(CONFIG_DEBUG_FS)
> +	.debugfs_init = virtio_gpu_debugfs_init,
> +	.debugfs_cleanup = virtio_gpu_debugfs_takedown,
> +#endif
> +
> +	.gem_free_object = virtio_gpu_gem_free_object,
> +	.fops = &virtio_gpu_driver_fops,
> +
> +	.name = DRIVER_NAME,
> +	.desc = DRIVER_DESC,
> +	.date = DRIVER_DATE,
> +	.major = DRIVER_MAJOR,
> +	.minor = DRIVER_MINOR,
> +	.patchlevel = DRIVER_PATCHLEVEL,
> +};
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
> new file mode 100644
> index 0000000..6082ec3
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -0,0 +1,326 @@
> +/*
> + * Copyright (C) 2012 Red Hat
> + *
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License v2. See the file COPYING in the main directory of this archive for
> + * more details.
> + */
> +
> +#ifndef VIRTIO_DRV_H
> +#define VIRTIO_DRV_H
> +
> +#include <linux/virtio.h>
> +#include <linux/virtio_ids.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_gpu.h>
> +
> +#include <drm/drmP.h>
> +#include <drm/drm_gem.h>
> +#include <drm/drm_crtc_helper.h>
> +#include <ttm/ttm_bo_api.h>
> +#include <ttm/ttm_bo_driver.h>
> +#include <ttm/ttm_placement.h>
> +#include <ttm/ttm_module.h>
> +
> +#define DRIVER_NAME "virtio_gpu"
> +#define DRIVER_DESC "virtio GPU"
> +#define DRIVER_DATE "0"
> +
> +#define DRIVER_MAJOR 0
> +#define DRIVER_MINOR 0
> +#define DRIVER_PATCHLEVEL 1
> +
> +/* virtgpu_drm_bus.c */
> +int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
> +int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
> +
> +struct virtio_gpu_object {
> +	struct drm_gem_object gem_base;
> +	uint32_t hw_res_handle;
> +
> +	struct sg_table *pages;
> +	void *vmap;
> +	bool dumb;
> +	struct ttm_place                placement_code;
> +	struct ttm_placement		placement;
> +	struct ttm_buffer_object	tbo;
> +	struct ttm_bo_kmap_obj		kmap;
> +};
> +#define gem_to_virtio_gpu_obj(gobj) \
> +	container_of((gobj), struct virtio_gpu_object, gem_base)
> +
> +struct virtio_gpu_vbuffer;
> +struct virtio_gpu_device;
> +
> +typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
> +				   struct virtio_gpu_vbuffer *vbuf);
> +
> +struct virtio_gpu_fence_driver {
> +	atomic64_t       last_seq;
> +	uint64_t         sync_seq;
> +	struct list_head fences;
> +	spinlock_t       lock;
> +};
> +
> +struct virtio_gpu_fence {
> +	struct fence f;
> +	struct virtio_gpu_fence_driver *drv;
> +	struct list_head node;
> +	uint64_t seq;
> +};
> +#define to_virtio_fence(x) \
> +	container_of(x, struct virtio_gpu_fence, f)
> +
> +struct virtio_gpu_vbuffer {
> +	char *buf;
> +	int size;
> +	bool debug_dump_sglists;
> +
> +	void *data_buf;
> +	uint32_t data_size;
> +
> +	char *resp_buf;
> +	int resp_size;
> +
> +	virtio_gpu_resp_cb resp_cb;
> +
> +	struct list_head destroy_list;
> +};
> +
> +struct virtio_gpu_output {
> +	int index;
> +	struct drm_crtc crtc;
> +	struct drm_connector conn;
> +	struct drm_encoder enc;
> +	struct virtio_gpu_display_one info;
> +	struct virtio_gpu_update_cursor cursor;
> +	int cur_x;
> +	int cur_y;
> +};
> +#define drm_crtc_to_virtio_gpu_output(x) \
> +	container_of(x, struct virtio_gpu_output, crtc)
> +#define drm_connector_to_virtio_gpu_output(x) \
> +	container_of(x, struct virtio_gpu_output, conn)
> +#define drm_encoder_to_virtio_gpu_output(x) \
> +	container_of(x, struct virtio_gpu_output, enc)
> +
> +struct virtio_gpu_framebuffer {
> +	struct drm_framebuffer base;
> +	struct drm_gem_object *obj;
> +	int x1, y1, x2, y2; /* dirty rect */
> +	spinlock_t dirty_lock;
> +	uint32_t hw_res_handle;
> +};
> +#define to_virtio_gpu_framebuffer(x) \
> +	container_of(x, struct virtio_gpu_framebuffer, base)
> +
> +struct virtio_gpu_mman {
> +	struct ttm_bo_global_ref        bo_global_ref;
> +	struct drm_global_reference	mem_global_ref;
> +	bool				mem_global_referenced;
> +	struct ttm_bo_device		bdev;
> +};
> +
> +struct virtio_gpu_fbdev;
> +
> +struct virtio_gpu_queue {
> +	struct virtqueue *vq;
> +	spinlock_t qlock;
> +	wait_queue_head_t ack_queue;
> +	struct work_struct dequeue_work;
> +};
> +
> +struct virtio_gpu_device {
> +	struct device *dev;
> +	struct drm_device *ddev;
> +
> +	struct virtio_device *vdev;
> +
> +	struct virtio_gpu_mman mman;
> +
> +	/* pointer to fbdev info structure */
> +	struct virtio_gpu_fbdev *vgfbdev;
> +	struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
> +	uint32_t num_scanouts;
> +
> +	struct virtio_gpu_queue ctrlq;
> +	struct virtio_gpu_queue cursorq;
> +
> +	struct idr	resource_idr;
> +	spinlock_t resource_idr_lock;
> +
> +	wait_queue_head_t resp_wq;
> +	/* current display info */
> +	spinlock_t display_info_lock;
> +
> +	struct virtio_gpu_fence_driver fence_drv;
> +
> +	struct idr	ctx_id_idr;
> +	spinlock_t ctx_id_idr_lock;
> +
> +	struct work_struct config_changed_work;
> +};
> +
> +struct virtio_gpu_fpriv {
> +	uint32_t ctx_id;
> +};
> +
> +/* virtio_ioctl.c */
> +#define DRM_VIRTIO_NUM_IOCTLS 10
> +extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
> +
> +/* virtio_kms.c */
> +int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
> +int virtio_gpu_driver_unload(struct drm_device *dev);
> +
> +/* virtio_gem.c */
> +void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
> +int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_gem_create(struct drm_file *file,
> +			  struct drm_device *dev,
> +			  uint64_t size,
> +			  struct drm_gem_object **obj_p,
> +			  uint32_t *handle_p);
> +struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
> +						  size_t size, bool kernel,
> +						  bool pinned);
> +int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
> +				struct drm_device *dev,
> +				struct drm_mode_create_dumb *args);
> +int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
> +				 struct drm_device *dev,
> +				 uint32_t handle);
> +int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
> +			      struct drm_device *dev,
> +			      uint32_t handle, uint64_t *offset_p);
> +
> +/* virtio_fb */
> +#define VIRTIO_GPUFB_CONN_LIMIT 1
> +int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
> +			     struct drm_clip_rect *clips,
> +			     unsigned num_clips);
> +/* virtio vg */
> +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
> +			       uint32_t *resid);
> +void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
> +int virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
> +				   uint32_t resource_id,
> +				   uint32_t format,
> +				   uint32_t width,
> +				   uint32_t height);
> +int virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id);
> +int virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
> +				       uint32_t resource_id, uint64_t offset,
> +				       __le32 width, __le32 height,
> +				       __le32 x, __le32 y,
> +				       struct virtio_gpu_fence **fence);
> +int virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id,
> +				  uint32_t x, uint32_t y,
> +				  uint32_t width, uint32_t height);
> +int virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
> +			       uint32_t scanout_id, uint32_t resource_id,
> +			       uint32_t width, uint32_t height,
> +			       uint32_t x, uint32_t y);
> +int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> +			     struct virtio_gpu_object *obj,
> +			     uint32_t resource_id,
> +			     struct virtio_gpu_fence **fence);
> +int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
> +			    struct virtio_gpu_output *output);
> +int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
> +					  uint32_t resource_id);
> +void virtio_gpu_ctrl_ack(struct virtqueue *vq);
> +void virtio_gpu_cursor_ack(struct virtqueue *vq);
> +void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
> +void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
> +
> +/* virtio_gpu_display.c */
> +int virtio_gpu_framebuffer_init(struct drm_device *dev,
> +				struct virtio_gpu_framebuffer *vgfb,
> +				struct drm_mode_fb_cmd2 *mode_cmd,
> +				struct drm_gem_object *obj);
> +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
> +
> +/* virtio_gpu_ttm.c */
> +int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
> +bool virtio_gpu_ttm_bo_is_virtio_gpu_object(struct ttm_buffer_object *bo);
> +int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
> +
> +/* virtio_gpu_fence.c */
> +int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
> +			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
> +			  struct virtio_gpu_fence **fence);
> +void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
> +				    u64 last_seq);
> +
> +/* virtio_gpu_object */
> +int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
> +			     unsigned long size, bool kernel, bool pinned,
> +			     struct virtio_gpu_object **bo_ptr);
> +int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr);
> +int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
> +				   struct virtio_gpu_object *bo);
> +void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
> +int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
> +
> +static inline struct virtio_gpu_object*
> +virtio_gpu_object_ref(struct virtio_gpu_object *bo)
> +{
> +	ttm_bo_reference(&bo->tbo);
> +	return bo;
> +}
> +
> +static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
> +{
> +	struct ttm_buffer_object *tbo;
> +
> +	if ((*bo) == NULL)
> +		return;
> +	tbo = &((*bo)->tbo);
> +	ttm_bo_unref(&tbo);
> +	if (tbo == NULL)
> +		*bo = NULL;
> +}
> +
> +static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
> +{
> +	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
> +}
> +
> +static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
> +					 bool no_wait)
> +{
> +	int r;
> +
> +	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
> +	if (unlikely(r != 0)) {
> +		if (r != -ERESTARTSYS) {
> +			struct virtio_gpu_device *qdev =
> +				bo->gem_base.dev->dev_private;
> +			dev_err(qdev->dev, "%p reserve failed\n", bo);
> +		}
> +		return r;
> +	}
> +	return 0;
> +}
> +
> +static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
> +{
> +	ttm_bo_unreserve(&bo->tbo);
> +}
> +
> +/* virgl debufs */
> +int virtio_gpu_debugfs_init(struct drm_minor *minor);
> +void virtio_gpu_debugfs_takedown(struct drm_minor *minor);
> +
> +#endif
> diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
> new file mode 100644
> index 0000000..1d79457
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
> @@ -0,0 +1,415 @@
> +#include <drm/drmP.h>
> +#include <drm/drm_fb_helper.h>
> +#include "virtgpu_drv.h"
> +
> +#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
> +
> +struct virtio_gpu_fbdev {
> +	struct drm_fb_helper           helper;
> +	struct virtio_gpu_framebuffer  vgfb;
> +	struct list_head	       fbdev_list;
> +	struct virtio_gpu_device       *vgdev;
> +	struct delayed_work            work;
> +};
> +#define DL_ALIGN_UP(x, a) ALIGN(x, a)
> +#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
> +
> +static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
> +				   bool store, int x, int y,
> +				   int width, int height)
> +{
> +	struct drm_device *dev = fb->base.dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	bool store_for_later = false;
> +	int aligned_x;
> +	int bpp = (fb->base.bits_per_pixel / 8);
> +	int x2, y2;
> +	unsigned long flags;
> +	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
> +
> +	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
> +	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
> +	x = aligned_x;
> +
> +	if ((width <= 0) ||
> +	    (x + width > fb->base.width) ||
> +	    (y + height > fb->base.height)) {
> +		DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
> +			  width, height, x, y,
> +			  fb->base.width, fb->base.height);
> +		return -EINVAL;
> +	}
> +
> +	/* if we are in atomic just store the info
> +	   can't test inside spin lock */
> +	if (in_atomic() || store)
> +		store_for_later = true;
> +
> +	x2 = x + width - 1;
> +	y2 = y + height - 1;
> +
> +	spin_lock_irqsave(&fb->dirty_lock, flags);
> +
> +	if (fb->y1 < y)
> +		y = fb->y1;
> +	if (fb->y2 > y2)
> +		y2 = fb->y2;
> +	if (fb->x1 < x)
> +		x = fb->x1;
> +	if (fb->x2 > x2)
> +		x2 = fb->x2;
> +
> +	if (store_for_later) {
> +		fb->x1 = x;
> +		fb->x2 = x2;
> +		fb->y1 = y;
> +		fb->y2 = y2;
> +		spin_unlock_irqrestore(&fb->dirty_lock, flags);
> +		return 0;
> +	}
> +
> +	fb->x1 = fb->y1 = INT_MAX;
> +	fb->x2 = fb->y2 = 0;
> +
> +	spin_unlock_irqrestore(&fb->dirty_lock, flags);
> +
> +	{
> +		uint32_t offset;
> +		uint32_t w = x2 - x + 1;
> +		uint32_t h = y2 - y + 1;
> +
> +		offset = (y * fb->base.pitches[0]) + x * bpp;
> +
> +		virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
> +						   offset,
> +						   cpu_to_le32(w),
> +						   cpu_to_le32(h),
> +						   cpu_to_le32(x),
> +						   cpu_to_le32(y),
> +						   NULL);
> +
> +	}
> +	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
> +				      x, y, x2 - x + 1, y2 - y + 1);
> +	return 0;
> +}
> +
> +int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
> +			     struct drm_clip_rect *clips,
> +			     unsigned num_clips)
> +{
> +	struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
> +	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
> +	struct drm_clip_rect norect;
> +	struct drm_clip_rect *clips_ptr;
> +	int left, right, top, bottom;
> +	int i;
> +	int inc = 1;
> +	if (!num_clips) {
> +		num_clips = 1;
> +		clips = &norect;
> +		norect.x1 = norect.y1 = 0;
> +		norect.x2 = vgfb->base.width;
> +		norect.y2 = vgfb->base.height;
> +	}
> +	left = clips->x1;
> +	right = clips->x2;
> +	top = clips->y1;
> +	bottom = clips->y2;
> +
> +	/* skip the first clip rect */
> +	for (i = 1, clips_ptr = clips + inc;
> +	     i < num_clips; i++, clips_ptr += inc) {
> +		left = min_t(int, left, (int)clips_ptr->x1);
> +		right = max_t(int, right, (int)clips_ptr->x2);
> +		top = min_t(int, top, (int)clips_ptr->y1);
> +		bottom = max_t(int, bottom, (int)clips_ptr->y2);
> +	}
> +
> +	if (obj->dumb)
> +		return virtio_gpu_dirty_update(vgfb, false, left, top,
> +					       right - left, bottom - top);
> +
> +	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
> +				      left, top, right - left, bottom - top);
> +	return 0;
> +}
> +
> +static void virtio_gpu_fb_dirty_work(struct work_struct *work)
> +{
> +	struct delayed_work *delayed_work = to_delayed_work(work);
> +	struct virtio_gpu_fbdev *vfbdev =
> +		container_of(delayed_work, struct virtio_gpu_fbdev, work);
> +	struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
> +
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
> +				vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
> +}
> +
> +static void virtio_gpu_3d_fillrect(struct fb_info *info,
> +				   const struct fb_fillrect *rect)
> +{
> +	struct virtio_gpu_fbdev *vfbdev = info->par;
> +	sys_fillrect(info, rect);
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
> +			     rect->width, rect->height);
> +	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
> +}
> +
> +static void virtio_gpu_3d_copyarea(struct fb_info *info,
> +				   const struct fb_copyarea *area)
> +{
> +	struct virtio_gpu_fbdev *vfbdev = info->par;
> +	sys_copyarea(info, area);
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
> +			   area->width, area->height);
> +	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
> +}
> +
> +static void virtio_gpu_3d_imageblit(struct fb_info *info,
> +				    const struct fb_image *image)
> +{
> +	struct virtio_gpu_fbdev *vfbdev = info->par;
> +	sys_imageblit(info, image);
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
> +			     image->width, image->height);
> +	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
> +}
> +
> +static struct fb_ops virtio_gpufb_ops = {
> +	.owner = THIS_MODULE,
> +	.fb_check_var = drm_fb_helper_check_var,
> +	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
> +	.fb_fillrect = virtio_gpu_3d_fillrect,
> +	.fb_copyarea = virtio_gpu_3d_copyarea,
> +	.fb_imageblit = virtio_gpu_3d_imageblit,
> +	.fb_pan_display = drm_fb_helper_pan_display,
> +	.fb_blank = drm_fb_helper_blank,
> +	.fb_setcmap = drm_fb_helper_setcmap,
> +	.fb_debug_enter = drm_fb_helper_debug_enter,
> +	.fb_debug_leave = drm_fb_helper_debug_leave,
> +};
> +
> +static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
> +			      struct virtio_gpu_object *obj)
> +{
> +	return virtio_gpu_object_kmap(obj, NULL);
> +}
> +
> +static int virtio_gpufb_create(struct drm_fb_helper *helper,
> +			       struct drm_fb_helper_surface_size *sizes)
> +{
> +	struct virtio_gpu_fbdev *vfbdev =
> +		container_of(helper, struct virtio_gpu_fbdev, helper);
> +	struct drm_device *dev = helper->dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct fb_info *info;
> +	struct drm_framebuffer *fb;
> +	struct drm_mode_fb_cmd2 mode_cmd = {};
> +	struct virtio_gpu_object *obj;
> +	struct device *device = vgdev->dev;
> +	uint32_t resid, format, size;
> +	int ret;
> +
> +	if (sizes->surface_bpp == 24)
> +		sizes->surface_bpp = 32;
> +	mode_cmd.width = sizes->surface_width;
> +	mode_cmd.height = sizes->surface_height;
> +	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
> +	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
> +							  sizes->surface_depth);
> +
> +	switch (mode_cmd.pixel_format) {
> +#ifdef __BIG_ENDIAN
> +	case DRM_FORMAT_XRGB8888:
> +		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_ARGB8888:
> +		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRX8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRA8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBX8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBA8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
> +		break;
> +	case DRM_FORMAT_XBGR8888:
> +		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
> +		break;
> +	case DRM_FORMAT_ABGR8888:
> +		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
> +		break;
> +#else
> +	case DRM_FORMAT_XRGB8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_ARGB8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRX8888:
> +		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRA8888:
> +		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBX8888:
> +		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBA8888:
> +		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
> +		break;
> +	case DRM_FORMAT_XBGR8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_ABGR8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
> +		break;
> +#endif
> +	default:
> +		format = 0;
> +		break;
> +	}
> +	if (format == 0) {
> +		ret = -EINVAL;
> +		DRM_ERROR("failed to find virtio gpu format for %d\n",
> +			  mode_cmd.pixel_format);
> +		goto fail;
> +	}
> +
> +	size = mode_cmd.pitches[0] * mode_cmd.height;
> +	obj = virtio_gpu_alloc_object(dev, size, false, true);
> +	if (!obj) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
> +	ret = virtio_gpu_resource_id_get(vgdev, &resid);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_cmd_create_resource(vgdev, resid, format,
> +					  mode_cmd.width, mode_cmd.height);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_vmap_fb(vgdev, obj);
> +	if (ret) {
> +		DRM_ERROR("failed to vmap fb %d\n", ret);
> +		goto fail;
> +	}
> +
> +	/* attach the object to the resource */
> +	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
> +	if (ret)
> +		goto fail;
> +
> +	info = framebuffer_alloc(0, device);
> +	if (!info) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
> +	info->par = helper;
> +
> +	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
> +				       &mode_cmd, &obj->gem_base);
> +	if (ret)
> +		goto fail;
> +
> +	fb = &vfbdev->vgfb.base;
> +
> +	vfbdev->helper.fb = fb;
> +	vfbdev->helper.fbdev = info;
> +
> +	strcpy(info->fix.id, "virtiodrmfb");
> +	info->flags = FBINFO_DEFAULT;
> +	info->fbops = &virtio_gpufb_ops;
> +	info->pixmap.flags = FB_PIXMAP_SYSTEM;
> +	ret = fb_alloc_cmap(&info->cmap, 256, 0);
> +	if (ret) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
> +	info->screen_base = obj->vmap;
> +	info->screen_size = obj->gem_base.size;
> +	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
> +	drm_fb_helper_fill_var(info, &vfbdev->helper,
> +			       sizes->fb_width, sizes->fb_height);
> +
> +	info->fix.mmio_start = 0;
> +	info->fix.mmio_len = 0;
> +
> +	return 0;
> +fail:
> +
> +	return -EINVAL;
> +}
> +
> +static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
> +				    struct virtio_gpu_fbdev *vgfbdev)
> +{
> +	struct fb_info *info;
> +	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
> +
> +	if (vgfbdev->helper.fbdev) {
> +		info = vgfbdev->helper.fbdev;
> +
> +		unregister_framebuffer(info);
> +		framebuffer_release(info);
> +	}
> +	if (vgfb->obj)
> +		vgfb->obj = NULL;
> +	drm_fb_helper_fini(&vgfbdev->helper);
> +	drm_framebuffer_cleanup(&vgfb->base);
> +
> +	return 0;
> +}
> +static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
> +	.fb_probe = virtio_gpufb_create,
> +};
> +
> +int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
> +{
> +	struct virtio_gpu_fbdev *vgfbdev;
> +	int bpp_sel = 32; /* TODO: parameter from somewhere? */
> +	int ret;
> +
> +	vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
> +	if (!vgfbdev)
> +		return -ENOMEM;
> +
> +	vgfbdev->vgdev = vgdev;
> +	vgdev->vgfbdev = vgfbdev;
> +	INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
> +
> +	drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
> +			      &virtio_gpu_fb_helper_funcs);
> +	ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
> +				 vgdev->num_scanouts,
> +				 VIRTIO_GPUFB_CONN_LIMIT);
> +	if (ret) {
> +		kfree(vgfbdev);
> +		return ret;
> +	}
> +
> +	drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
> +	drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
> +	return 0;
> +}
> +
> +void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
> +{
> +	if (!vgdev->vgfbdev)
> +		return;
> +
> +	virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
> +	kfree(vgdev->vgfbdev);
> +	vgdev->vgfbdev = NULL;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
> new file mode 100644
> index 0000000..552aa49
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
> @@ -0,0 +1,95 @@
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +
> +static const char *virtio_get_driver_name(struct fence *f)
> +{
> +	return "virtio_gpu";
> +}
> +
> +static const char *virtio_get_timeline_name(struct fence *f)
> +{
> +	return "controlq";
> +}
> +
> +static bool virtio_enable_signaling(struct fence *f)
> +{
> +	return true;
> +}
> +
> +static bool virtio_signaled(struct fence *f)
> +{
> +	struct virtio_gpu_fence *fence = to_virtio_fence(f);
> +
> +	if (atomic64_read(&fence->drv->last_seq) >= fence->seq) {
> +		return true;
> +	}
> +	return false;
> +}
> +
> +static void virtio_fence_value_str(struct fence *f, char *str, int size)
> +{
> +	struct virtio_gpu_fence *fence = to_virtio_fence(f);
> +
> +	snprintf(str, size, "%llu", fence->seq);
> +}
> +
> +static void virtio_timeline_value_str(struct fence *f, char *str, int size)
> +{
> +	struct virtio_gpu_fence *fence = to_virtio_fence(f);
> +
> +	snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
> +}
> +
> +static const struct fence_ops virtio_fence_ops = {
> +	.get_driver_name     = virtio_get_driver_name,
> +	.get_timeline_name   = virtio_get_timeline_name,
> +	.enable_signaling    = virtio_enable_signaling,
> +	.signaled            = virtio_signaled,
> +	.wait                = fence_default_wait,
> +	.fence_value_str     = virtio_fence_value_str,
> +	.timeline_value_str  = virtio_timeline_value_str,
> +};
> +
> +int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
> +			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
> +			  struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
> +	unsigned long irq_flags;
> +
> +	*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL);
> +	if ((*fence) == NULL)
> +		return -ENOMEM;
> +
> +	spin_lock_irqsave(&drv->lock, irq_flags);
> +	(*fence)->drv = drv;
> +	(*fence)->seq = ++drv->sync_seq;
> +	fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
> +		   0, (*fence)->seq);
> +	fence_get(&(*fence)->f);
> +	list_add_tail(&(*fence)->node, &drv->fences);
> +	spin_unlock_irqrestore(&drv->lock, irq_flags);
> +
> +	cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
> +	cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
> +	return 0;
> +}
> +
> +void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
> +				    u64 last_seq)
> +{
> +	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
> +	struct virtio_gpu_fence *fence, *tmp;
> +	unsigned long irq_flags;
> +
> +	spin_lock_irqsave(&drv->lock, irq_flags);
> +	atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
> +	list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
> +		if (last_seq < fence->seq)
> +			continue;
> +		fence_signal_locked(&fence->f);
> +		list_del(&fence->node);
> +		fence_put(&fence->f);
> +	}
> +	spin_unlock_irqrestore(&drv->lock, irq_flags);
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
> new file mode 100644
> index 0000000..8bc0a24
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -0,0 +1,120 @@
> +
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +
> +void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
> +{
> +	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
> +
> +	if (obj)
> +		virtio_gpu_object_unref(&obj);
> +}
> +
> +struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
> +						  size_t size, bool kernel,
> +						  bool pinned)
> +{
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct virtio_gpu_object *obj;
> +	int ret;
> +
> +	ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	return obj;
> +}
> +
> +int virtio_gpu_gem_create(struct drm_file *file,
> +			  struct drm_device *dev,
> +			  uint64_t size,
> +			  struct drm_gem_object **obj_p,
> +			  uint32_t *handle_p)
> +{
> +	struct virtio_gpu_object *obj;
> +	int ret;
> +	u32 handle;
> +
> +	obj = virtio_gpu_alloc_object(dev, size, false, false);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
> +	if (ret) {
> +		drm_gem_object_release(&obj->gem_base);
> +		return ret;
> +	}
> +
> +	*obj_p = &obj->gem_base;
> +
> +	/* drop reference from allocate - handle holds it now */
> +	drm_gem_object_unreference_unlocked(&obj->gem_base);
> +
> +	*handle_p = handle;
> +	return 0;
> +}
> +
> +int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
> +				struct drm_device *dev,
> +				struct drm_mode_create_dumb *args)
> +{
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct drm_gem_object *gobj;
> +	struct virtio_gpu_object *obj;
> +	int ret;
> +	uint32_t pitch;
> +	uint32_t resid;
> +
> +	pitch = args->width * ((args->bpp + 1) / 8);
> +	args->size = pitch * args->height;
> +	args->size = ALIGN(args->size, PAGE_SIZE);
> +
> +	ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
> +				 &args->handle);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_resource_id_get(vgdev, &resid);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_cmd_create_resource(vgdev, resid,
> +					  2, args->width, args->height);
> +	if (ret)
> +		goto fail;
> +
> +	/* attach the object to the resource */
> +	obj = gem_to_virtio_gpu_obj(gobj);
> +	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
> +	if (ret)
> +		goto fail;
> +
> +	obj->dumb = true;
> +	args->pitch = pitch;
> +	return ret;
> +fail:
> +	return ret;
> +}
> +
> +int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
> +				 struct drm_device *dev,
> +				 uint32_t handle)
> +{
> +	return drm_gem_handle_delete(file_priv, handle);
> +}
> +
> +int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
> +			      struct drm_device *dev,
> +			      uint32_t handle, uint64_t *offset_p)
> +{
> +	struct drm_gem_object *gobj;
> +	struct virtio_gpu_object *obj;
> +	BUG_ON(!offset_p);
> +	gobj = drm_gem_object_lookup(dev, file_priv, handle);
> +	if (gobj == NULL)
> +		return -ENOENT;
> +	obj = gem_to_virtio_gpu_obj(gobj);
> +	*offset_p = virtio_gpu_object_mmap_offset(obj);
> +	drm_gem_object_unreference_unlocked(gobj);
> +	return 0;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
> new file mode 100644
> index 0000000..45c4beb
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
> @@ -0,0 +1,125 @@
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +
> +static void virtio_gpu_config_changed_work_func(struct work_struct *work)
> +{
> +	struct virtio_gpu_device *vgdev =
> +		container_of(work, struct virtio_gpu_device,
> +			     config_changed_work);
> +	u32 events_read, events_clear = 0;
> +
> +	/* read the config space */
> +	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
> +		     events_read, &events_read);
> +	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
> +		virtio_gpu_cmd_get_display_info(vgdev);
> +		drm_helper_hpd_irq_event(vgdev->ddev);
> +		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
> +	}
> +	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
> +		      events_clear, &events_clear);
> +}
> +
> +static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
> +			       void (*work_func)(struct work_struct *work))
> +{
> +	spin_lock_init(&vgvq->qlock);
> +	init_waitqueue_head(&vgvq->ack_queue);
> +	INIT_WORK(&vgvq->dequeue_work, work_func);
> +}
> +
> +int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
> +{
> +	static vq_callback_t *callbacks[] = {
> +		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
> +	};
> +	static const char *names[] = { "control", "cursor" };
> +
> +	struct virtio_gpu_device *vgdev;
> +	/* this will expand later */
> +	struct virtqueue *vqs[2];
> +	u32 num_scanouts;
> +	int ret;
> +
> +	if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
> +		return -ENODEV;
> +
> +	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
> +	if (!vgdev)
> +		return -ENOMEM;
> +
> +	vgdev->ddev = dev;
> +	dev->dev_private = vgdev;
> +	vgdev->vdev = dev->virtdev;
> +	vgdev->dev = dev->dev;
> +
> +	spin_lock_init(&vgdev->display_info_lock);
> +	spin_lock_init(&vgdev->ctx_id_idr_lock);
> +	idr_init(&vgdev->ctx_id_idr);
> +	spin_lock_init(&vgdev->resource_idr_lock);
> +	idr_init(&vgdev->resource_idr);
> +	init_waitqueue_head(&vgdev->resp_wq);
> +	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
> +	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
> +
> +	spin_lock_init(&vgdev->fence_drv.lock);
> +	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
> +	INIT_WORK(&vgdev->config_changed_work,
> +		  virtio_gpu_config_changed_work_func);
> +
> +	ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
> +					    callbacks, names);
> +	if (ret) {
> +		DRM_ERROR("failed to find virt queues\n");
> +		goto err_vqs;
> +	}
> +	vgdev->ctrlq.vq = vqs[0];
> +	vgdev->cursorq.vq = vqs[1];
> +
> +	ret = virtio_gpu_ttm_init(vgdev);
> +	if (ret) {
> +		DRM_ERROR("failed to init ttm %d\n", ret);
> +		goto err_ttm;
> +	}
> +
> +	/* get display info */
> +	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
> +		     num_scanouts, &num_scanouts);
> +	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
> +				    VIRTIO_GPU_MAX_SCANOUTS);
> +	if (!vgdev->num_scanouts) {
> +		DRM_ERROR("num_scanouts is zero\n");
> +		ret = -EINVAL;
> +		goto err_scanouts;
> +	}
> +
> +	ret = virtio_gpu_modeset_init(vgdev);
> +	if (ret)
> +		goto err_modeset;
> +
> +	virtio_device_ready(vgdev->vdev);
> +	virtio_gpu_cmd_get_display_info(vgdev);
> +	return 0;
> +
> +err_modeset:
> +err_scanouts:
> +	virtio_gpu_ttm_fini(vgdev);
> +err_ttm:
> +	vgdev->vdev->config->del_vqs(vgdev->vdev);
> +err_vqs:
> +	kfree(vgdev);
> +	return ret;
> +}
> +
> +int virtio_gpu_driver_unload(struct drm_device *dev)
> +{
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +
> +	virtio_gpu_modeset_fini(vgdev);
> +	virtio_gpu_ttm_fini(vgdev);
> +	vgdev->vdev->config->del_vqs(vgdev->vdev);
> +	kfree(vgdev);
> +	return 0;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
> new file mode 100644
> index 0000000..0d98ae4
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -0,0 +1,174 @@
> +#include "virtgpu_drv.h"
> +
> +static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
> +{
> +	struct virtio_gpu_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +
> +	bo = container_of(tbo, struct virtio_gpu_object, tbo);
> +	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
> +
> +	if (bo->hw_res_handle)
> +		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
> +	if (bo->pages)
> +		virtio_gpu_object_free_sg_table(bo);
> +	drm_gem_object_release(&bo->gem_base);
> +	kfree(bo);
> +}
> +
> +bool virtio_gpu_ttm_bo_is_virtio_gpu_object(struct ttm_buffer_object *bo)
> +{
> +	if (bo->destroy == &virtio_gpu_ttm_bo_destroy)
> +		return true;
> +	return false;
> +}
> +
> +static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
> +					  bool pinned)
> +{
> +	u32 c = 1;
> +	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
> +
> +	vgbo->placement.placement = &vgbo->placement_code;
> +	vgbo->placement.busy_placement = &vgbo->placement_code;
> +	vgbo->placement_code.fpfn = 0;
> +	vgbo->placement_code.lpfn = 0;
> +	vgbo->placement_code.flags =
> +		TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
> +	vgbo->placement.num_placement = c;
> +	vgbo->placement.num_busy_placement = c;
> +
> +}
> +
> +int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
> +			     unsigned long size, bool kernel, bool pinned,
> +			     struct virtio_gpu_object **bo_ptr)
> +{
> +	struct virtio_gpu_object *bo;
> +	enum ttm_bo_type type;
> +	size_t acc_size;
> +	int r;
> +
> +	if (kernel)
> +		type = ttm_bo_type_kernel;
> +	else
> +		type = ttm_bo_type_device;
> +	*bo_ptr = NULL;
> +
> +	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
> +				       sizeof(struct virtio_gpu_object));
> +
> +	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
> +	if (bo == NULL)
> +		return -ENOMEM;
> +	size = roundup(size, PAGE_SIZE);
> +	r = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
> +	if (unlikely(r)) {
> +		kfree(bo);
> +		return r;
> +	}
> +	bo->dumb = false;
> +
> +	virtio_gpu_init_ttm_placement(bo, pinned);
> +	r = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
> +			&bo->placement, 0, !kernel, NULL, acc_size,
> +			NULL, NULL, &virtio_gpu_ttm_bo_destroy);
> +	if (unlikely(r != 0)) {
> +		if (r != -ERESTARTSYS)
> +			dev_err(vgdev->dev,
> +				"object_init %d failed for (%lu)\n", r,
> +				size);
> +		return r;
> +	}
> +	*bo_ptr = bo;
> +	return 0;
> +}
> +
> +int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr)
> +{
> +	bool is_iomem;
> +	int r;
> +
> +	if (bo->vmap) {
> +		if (ptr)
> +			*ptr = bo->vmap;
> +		return 0;
> +	}
> +	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
> +	if (r)
> +		return r;
> +	bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
> +	if (ptr)
> +		*ptr = bo->vmap;
> +	return 0;
> +}
> +
> +#if 0
> +void virtio_gpu_object_force_delete(struct virtio_gpu_device *vgdev)
> +{
> +	struct virtio_gpu_object *bo, *n;
> +
> +
> +	dev_err(vgdev->dev, "Userspace still has active objects !\n");
> +	list_for_each_entry_safe(bo, n, &vgdev->gem.objects, list) {
> +		mutex_lock(&vgdev->ddev->struct_mutex);
> +		dev_err(vgdev->dev, "%p %p %lu %lu force free\n",
> +			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
> +			*((unsigned long *)&bo->gem_base.refcount));
> +		spin_lock(&vgdev->gem.lock);
> +		list_del_init(&bo->list);
> +		spin_unlock(&vgdev->gem.lock);
> +		/* this should unref the ttm bo */
> +		drm_gem_object_unreference(&bo->gem_base);
> +		mutex_unlock(&vgdev->ddev->struct_mutex);
> +	}
> +}
> +#endif
> +
> +int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
> +				   struct virtio_gpu_object *bo)
> +{
> +	int ret;
> +	struct page **pages = bo->tbo.ttm->pages;
> +	int nr_pages = bo->tbo.num_pages;
> +
> +	/* wtf swapping */
> +	if (bo->pages)
> +		return 0;
> +
> +	if (bo->tbo.ttm->state == tt_unpopulated)
> +		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
> +	bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> +	if (!bo->pages)
> +		goto out;
> +
> +	ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
> +					nr_pages << PAGE_SHIFT, GFP_KERNEL);
> +	if (ret)
> +		goto out;
> +	return 0;
> +out:
> +	kfree(bo->pages);
> +	bo->pages = NULL;
> +	return -ENOMEM;
> +}
> +
> +void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
> +{
> +	sg_free_table(bo->pages);
> +	kfree(bo->pages);
> +	bo->pages = NULL;
> +}
> +
> +int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
> +{
> +	int r;
> +
> +	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
> +	if (unlikely(r != 0))
> +		return r;
> +	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
> +	ttm_bo_unreserve(&bo->tbo);
> +	return r;
> +}
> +
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
> new file mode 100644
> index 0000000..a6f22e0
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
> @@ -0,0 +1,451 @@
> +/*
> + * Copyright 2013 Red Hat Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors: Dave Airlie
> + *          Alon Levy
> + */
> +
> +#include <ttm/ttm_bo_api.h>
> +#include <ttm/ttm_bo_driver.h>
> +#include <ttm/ttm_placement.h>
> +#include <ttm/ttm_page_alloc.h>
> +#include <ttm/ttm_module.h>
> +#include <drm/drmP.h>
> +#include <drm/drm.h>
> +#include "virtgpu_drv.h"
> +
> +#include <linux/delay.h>
> +
> +#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
> +
> +static struct
> +virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
> +{
> +	struct virtio_gpu_mman *mman;
> +	struct virtio_gpu_device *vgdev;
> +
> +	mman = container_of(bdev, struct virtio_gpu_mman, bdev);
> +	vgdev = container_of(mman, struct virtio_gpu_device, mman);
> +	return vgdev;
> +}
> +
> +static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
> +{
> +	return ttm_mem_global_init(ref->object);
> +}
> +
> +static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
> +{
> +	ttm_mem_global_release(ref->object);
> +}
> +
> +static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
> +{
> +	struct drm_global_reference *global_ref;
> +	int r;
> +
> +	vgdev->mman.mem_global_referenced = false;
> +	global_ref = &vgdev->mman.mem_global_ref;
> +	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
> +	global_ref->size = sizeof(struct ttm_mem_global);
> +	global_ref->init = &virtio_gpu_ttm_mem_global_init;
> +	global_ref->release = &virtio_gpu_ttm_mem_global_release;
> +
> +	r = drm_global_item_ref(global_ref);
> +	if (r != 0) {
> +		DRM_ERROR("Failed setting up TTM memory accounting "
> +			  "subsystem.\n");
> +		return r;
> +	}
> +
> +	vgdev->mman.bo_global_ref.mem_glob =
> +		vgdev->mman.mem_global_ref.object;
> +	global_ref = &vgdev->mman.bo_global_ref.ref;
> +	global_ref->global_type = DRM_GLOBAL_TTM_BO;
> +	global_ref->size = sizeof(struct ttm_bo_global);
> +	global_ref->init = &ttm_bo_global_init;
> +	global_ref->release = &ttm_bo_global_release;
> +	r = drm_global_item_ref(global_ref);
> +	if (r != 0) {
> +		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
> +		drm_global_item_unref(&vgdev->mman.mem_global_ref);
> +		return r;
> +	}
> +
> +	vgdev->mman.mem_global_referenced = true;
> +	return 0;
> +}
> +
> +static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
> +{
> +	if (vgdev->mman.mem_global_referenced) {
> +		drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
> +		drm_global_item_unref(&vgdev->mman.mem_global_ref);
> +		vgdev->mman.mem_global_referenced = false;
> +	}
> +}
> +
> +static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
> +static const struct vm_operations_struct *ttm_vm_ops;
> +
> +static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
> +				struct vm_fault *vmf)
> +{
> +	struct ttm_buffer_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +	int r;
> +
> +	bo = (struct ttm_buffer_object *)vma->vm_private_data;
> +	if (bo == NULL)
> +		return VM_FAULT_NOPAGE;
> +	vgdev = virtio_gpu_get_vgdev(bo->bdev);
> +	r = ttm_vm_ops->fault(vma, vmf);
> +	return r;
> +}
> +
> +int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +	struct drm_file *file_priv;
> +	struct virtio_gpu_device *vgdev;
> +	int r;
> +
> +	file_priv = filp->private_data;
> +	vgdev = file_priv->minor->dev->dev_private;
> +	if (vgdev == NULL) {
> +		DRM_ERROR(
> +		 "filp->private_data->minor->dev->dev_private == NULL\n");
> +		return -EINVAL;
> +	}
> +	r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
> +	if (unlikely(r != 0))
> +		return r;
> +	if (unlikely(ttm_vm_ops == NULL)) {
> +		ttm_vm_ops = vma->vm_ops;
> +		virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
> +		virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
> +	}
> +	vma->vm_ops = &virtio_gpu_ttm_vm_ops;
> +	return 0;
> +}
> +
> +static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
> +					uint32_t flags)
> +{
> +	return 0;
> +}
> +
> +static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
> +			       struct ttm_buffer_object *bo,
> +			       const struct ttm_place *place,
> +			       struct ttm_mem_reg *mem)
> +{
> +	mem->mm_node = (void *)1;
> +	return 0;
> +}
> +
> +static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
> +				struct ttm_mem_reg *mem)
> +{
> +	mem->mm_node = (void *)NULL;
> +	return;
> +}
> +
> +static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
> +			   unsigned long p_size)
> +{
> +	return 0;
> +}
> +
> +static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
> +{
> +	return 0;
> +}
> +
> +static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
> +			     const char *prefix)
> +{
> +}
> +
> +static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
> +	ttm_bo_man_init,
> +	ttm_bo_man_takedown,
> +	ttm_bo_man_get_node,
> +	ttm_bo_man_put_node,
> +	ttm_bo_man_debug
> +};
> +
> +static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
> +				    struct ttm_mem_type_manager *man)
> +{
> +	struct virtio_gpu_device *vgdev;
> +
> +	vgdev = virtio_gpu_get_vgdev(bdev);
> +
> +	switch (type) {
> +	case TTM_PL_SYSTEM:
> +		/* System memory */
> +		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> +		man->available_caching = TTM_PL_MASK_CACHING;
> +		man->default_caching = TTM_PL_FLAG_CACHED;
> +		break;
> +	case TTM_PL_TT:
> +		man->func = &virtio_gpu_bo_manager_func;
> +		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> +		man->available_caching = TTM_PL_MASK_CACHING;
> +		man->default_caching = TTM_PL_FLAG_CACHED;
> +		break;
> +	default:
> +		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
> +				struct ttm_placement *placement)
> +{
> +	static struct ttm_place placements = {
> +		.fpfn  = 0,
> +		.lpfn  = 0,
> +		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
> +	};
> +
> +	placement->placement = &placements;
> +	placement->busy_placement = &placements;
> +	placement->num_placement = 1;
> +	placement->num_busy_placement = 1;
> +	return;
> +}
> +
> +static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
> +				    struct file *filp)
> +{
> +	return 0;
> +}
> +
> +static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
> +					 struct ttm_mem_reg *mem)
> +{
> +	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
> +
> +	mem->bus.addr = NULL;
> +	mem->bus.offset = 0;
> +	mem->bus.size = mem->num_pages << PAGE_SHIFT;
> +	mem->bus.base = 0;
> +	mem->bus.is_iomem = false;
> +	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
> +		return -EINVAL;
> +	switch (mem->mem_type) {
> +	case TTM_PL_SYSTEM:
> +	case TTM_PL_TT:
> +		/* system memory */
> +		return 0;
> +	default:
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
> +				       struct ttm_mem_reg *mem)
> +{
> +}
> +
> +/*
> + * TTM backend functions.
> + */
> +struct virtio_gpu_ttm_tt {
> +	struct ttm_dma_tt		ttm;
> +	struct virtio_gpu_device		*vgdev;
> +	u64				offset;
> +};
> +
> +static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
> +				       struct ttm_mem_reg *bo_mem)
> +{
> +	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
> +
> +	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
> +	if (!ttm->num_pages) {
> +		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
> +		     ttm->num_pages, bo_mem, ttm);
> +	}
> +	/* Not implemented */
> +	return 0;
> +}
> +
> +static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
> +{
> +	/* Not implemented */
> +	return 0;
> +}
> +
> +static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
> +{
> +	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
> +
> +	ttm_dma_tt_fini(&gtt->ttm);
> +	kfree(gtt);
> +}
> +
> +static struct ttm_backend_func virtio_gpu_backend_func = {
> +	.bind = &virtio_gpu_ttm_backend_bind,
> +	.unbind = &virtio_gpu_ttm_backend_unbind,
> +	.destroy = &virtio_gpu_ttm_backend_destroy,
> +};
> +
> +static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
> +{
> +	if (ttm->state != tt_unpopulated)
> +		return 0;
> +
> +	return ttm_pool_populate(ttm);
> +}
> +
> +static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
> +{
> +	ttm_pool_unpopulate(ttm);
> +}
> +
> +static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
> +					       unsigned long size,
> +					       uint32_t page_flags,
> +					       struct page *dummy_read_page)
> +{
> +	struct virtio_gpu_device *vgdev;
> +	struct virtio_gpu_ttm_tt *gtt;
> +
> +	vgdev = virtio_gpu_get_vgdev(bdev);
> +	gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
> +	if (gtt == NULL)
> +		return NULL;
> +	gtt->ttm.ttm.func = &virtio_gpu_backend_func;
> +	gtt->vgdev = vgdev;
> +	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
> +			    dummy_read_page)) {
> +		kfree(gtt);
> +		return NULL;
> +	}
> +	return &gtt->ttm.ttm;
> +}
> +
> +static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
> +				 struct ttm_mem_reg *new_mem)
> +{
> +	struct ttm_mem_reg *old_mem = &bo->mem;
> +
> +	BUG_ON(old_mem->mm_node != NULL);
> +	*old_mem = *new_mem;
> +	new_mem->mm_node = NULL;
> +}
> +
> +static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
> +			      bool evict, bool interruptible,
> +			      bool no_wait_gpu,
> +			      struct ttm_mem_reg *new_mem)
> +{
> +	virtio_gpu_move_null(bo, new_mem);
> +	return 0;
> +}
> +
> +static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
> +				      struct ttm_mem_reg *new_mem)
> +{
> +	struct virtio_gpu_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +
> +	bo = container_of(tbo, struct virtio_gpu_object, tbo);
> +	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
> +
> +	if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
> +		if (bo->hw_res_handle)
> +			virtio_gpu_cmd_resource_inval_backing(vgdev,
> +							   bo->hw_res_handle);
> +
> +	} else if (new_mem->placement & TTM_PL_FLAG_TT) {
> +		if (bo->hw_res_handle) {
> +			virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
> +					      NULL);
> +		}
> +	}
> +
> +	return;
> +}
> +
> +static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
> +{
> +	struct virtio_gpu_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +
> +	bo = container_of(tbo, struct virtio_gpu_object, tbo);
> +	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
> +
> +	if (bo->pages)
> +		virtio_gpu_object_free_sg_table(bo);
> +}
> +
> +static struct ttm_bo_driver virtio_gpu_bo_driver = {
> +	.ttm_tt_create = &virtio_gpu_ttm_tt_create,
> +	.ttm_tt_populate = &virtio_gpu_ttm_tt_populate,
> +	.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
> +	.invalidate_caches = &virtio_gpu_invalidate_caches,
> +	.init_mem_type = &virtio_gpu_init_mem_type,
> +	.evict_flags = &virtio_gpu_evict_flags,
> +	.move = &virtio_gpu_bo_move,
> +	.verify_access = &virtio_gpu_verify_access,
> +	.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
> +	.io_mem_free = &virtio_gpu_ttm_io_mem_free,
> +	.move_notify = &virtio_gpu_bo_move_notify,
> +	.swap_notify = &virtio_gpu_bo_swap_notify,
> +};
> +
> +int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
> +{
> +	int r;
> +
> +	r = virtio_gpu_ttm_global_init(vgdev);
> +	if (r)
> +		return r;
> +	/* No others user of address space so set it to 0 */
> +	r = ttm_bo_device_init(&vgdev->mman.bdev,
> +			       vgdev->mman.bo_global_ref.ref.object,
> +			       &virtio_gpu_bo_driver,
> +			       vgdev->ddev->anon_inode->i_mapping,
> +			       DRM_FILE_PAGE_OFFSET, 0);
> +	if (r) {
> +		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
> +		return r;
> +	}
> +
> +	r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
> +	if (r) {
> +		DRM_ERROR("Failed initializing GTT heap.\n");
> +		return r;
> +	}
> +	return 0;
> +}
> +
> +void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
> +{
> +	ttm_bo_device_release(&vgdev->mman.bdev);
> +	virtio_gpu_ttm_global_fini(vgdev);
> +	DRM_INFO("virtio_gpu: ttm finalized\n");
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
> new file mode 100644
> index 0000000..a98cda8
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -0,0 +1,540 @@
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_ring.h>
> +
> +
> +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
> +{
> +	int handle;
> +
> +	idr_preload(GFP_KERNEL);
> +	spin_lock(&vgdev->resource_idr_lock);
> +	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
> +	spin_unlock(&vgdev->resource_idr_lock);
> +	idr_preload_end();
> +	*resid = handle;
> +	return 0;
> +}
> +
> +void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
> +{
> +	spin_lock(&vgdev->resource_idr_lock);
> +	idr_remove(&vgdev->resource_idr, id);
> +	spin_unlock(&vgdev->resource_idr_lock);
> +}
> +
> +void virtio_gpu_ctrl_ack(struct virtqueue *vq)
> +{
> +	struct drm_device *dev = vq->vdev->priv;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	schedule_work(&vgdev->ctrlq.dequeue_work);
> +}
> +
> +void virtio_gpu_cursor_ack(struct virtqueue *vq)
> +{
> +	struct drm_device *dev = vq->vdev->priv;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	schedule_work(&vgdev->cursorq.dequeue_work);
> +}
> +
> +static struct virtio_gpu_vbuffer*
> +virtio_gpu_allocate_vbuf(struct virtio_gpu_device *vgdev,
> +			 int size, int resp_size,
> +			 virtio_gpu_resp_cb resp_cb)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = kzalloc(sizeof(*vbuf) + size + resp_size, GFP_KERNEL);
> +	if (!vbuf)
> +		goto fail;
> +
> +	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
> +	vbuf->size = size;
> +
> +	vbuf->resp_cb = resp_cb;
> +	if (resp_size)
> +		vbuf->resp_buf = (void *)vbuf->buf + size;
> +	else
> +		vbuf->resp_buf = NULL;
> +	vbuf->resp_size = resp_size;
> +
> +	return vbuf;
> +fail:
> +	kfree(vbuf);
> +	return ERR_PTR(-ENOMEM);
> +}
> +
> +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
> +				  struct virtio_gpu_vbuffer **vbuffer_p,
> +				  int size)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = virtio_gpu_allocate_vbuf(vgdev, size,
> +				     sizeof(struct virtio_gpu_ctrl_hdr), NULL);
> +	if (IS_ERR(vbuf)) {
> +		*vbuffer_p = NULL;
> +		return ERR_CAST(vbuf);
> +	}
> +	*vbuffer_p = vbuf;
> +	return vbuf->buf;
> +}
> +
> +static struct virtio_gpu_update_cursor*
> +virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
> +			struct virtio_gpu_vbuffer **vbuffer_p)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = virtio_gpu_allocate_vbuf
> +		(vgdev, sizeof(struct virtio_gpu_update_cursor), 0, NULL);
> +	if (IS_ERR(vbuf)) {
> +		*vbuffer_p = NULL;
> +		return ERR_CAST(vbuf);
> +	}
> +	*vbuffer_p = vbuf;
> +	return (struct virtio_gpu_update_cursor *)vbuf->buf;
> +}
> +
> +static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
> +				       virtio_gpu_resp_cb cb,
> +				       struct virtio_gpu_vbuffer **vbuffer_p,
> +				       int cmd_size, int resp_size)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = virtio_gpu_allocate_vbuf(vgdev, cmd_size, resp_size, cb);
> +	if (IS_ERR(vbuf)) {
> +		*vbuffer_p = NULL;
> +		return ERR_CAST(vbuf);
> +	}
> +	*vbuffer_p = vbuf;
> +	return (struct virtio_gpu_command *)vbuf->buf;
> +}
> +
> +static void free_vbuf(struct virtio_gpu_device *vgdev,
> +		      struct virtio_gpu_vbuffer *vbuf)
> +{
> +	kfree(vbuf->data_buf);
> +	kfree(vbuf);
> +}
> +
> +static int reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +	unsigned int len;
> +	int freed = 0;
> +	while ((vbuf = virtqueue_get_buf(vq, &len))) {
> +		list_add_tail(&vbuf->destroy_list, reclaim_list);
> +		freed++;
> +	}
> +	return freed;
> +}
> +
> +void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
> +{
> +	struct virtio_gpu_device *vgdev =
> +		container_of(work, struct virtio_gpu_device,
> +			     ctrlq.dequeue_work);
> +	int ret;
> +	struct list_head reclaim_list;
> +	struct virtio_gpu_vbuffer *entry, *tmp;
> +	struct virtio_gpu_ctrl_hdr *resp;
> +	u64 fence_id = 0;
> +
> +	INIT_LIST_HEAD(&reclaim_list);
> +	spin_lock(&vgdev->ctrlq.qlock);
> +	do {
> +		virtqueue_disable_cb(vgdev->ctrlq.vq);
> +		ret = reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
> +		if (ret == 0)
> +			DRM_DEBUG("cleaned 0 buffers wierd\n");
> +
> +	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
> +	spin_unlock(&vgdev->ctrlq.qlock);
> +
> +	list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
> +		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
> +		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
> +			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
> +		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
> +			u64 f = le64_to_cpu(resp->fence_id);
> +
> +			if (fence_id > f) {
> +				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
> +					  __func__, fence_id, f);
> +			} else {
> +				fence_id = f;
> +			}
> +		}
> +		if (entry->resp_cb)
> +			entry->resp_cb(vgdev, entry);
> +
> +		list_del(&entry->destroy_list);
> +		free_vbuf(vgdev, entry);
> +	}
> +	wake_up(&vgdev->ctrlq.ack_queue);
> +
> +	if (fence_id) {
> +		virtio_gpu_fence_event_process(vgdev, fence_id);
> +	}
> +}
> +
> +void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
> +{
> +	struct virtio_gpu_device *vgdev =
> +		container_of(work, struct virtio_gpu_device,
> +			     cursorq.dequeue_work);
> +	struct virtqueue *vq = vgdev->cursorq.vq;
> +	struct list_head reclaim_list;
> +	struct virtio_gpu_vbuffer *entry, *tmp;
> +	unsigned int len;
> +	int ret;
> +
> +	INIT_LIST_HEAD(&reclaim_list);
> +	spin_lock(&vgdev->cursorq.qlock);
> +	do {
> +		virtqueue_disable_cb(vgdev->cursorq.vq);
> +		ret = reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
> +		if (ret == 0)
> +			DRM_DEBUG("cleaned 0 buffers wierd\n");
> +		while (virtqueue_get_buf(vq, &len))
> +			/* nothing */;
> +	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
> +	spin_unlock(&vgdev->cursorq.qlock);
> +
> +	list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
> +		list_del(&entry->destroy_list);
> +		free_vbuf(vgdev, entry);
> +	}
> +	wake_up(&vgdev->cursorq.ack_queue);
> +}
> +
> +static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
> +					struct virtio_gpu_vbuffer *vbuf)
> +{
> +	struct virtqueue *vq = vgdev->ctrlq.vq;
> +	struct scatterlist *sgs[3], vcmd, vout, vresp;
> +	int outcnt = 0, incnt = 0;
> +	int ret;
> +
> +	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
> +	sgs[outcnt+incnt] = &vcmd;
> +	outcnt++;
> +
> +	if (vbuf->data_buf) {
> +		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
> +		sgs[outcnt+incnt] = &vout;
> +		outcnt++;
> +	}
> +
> +	if (vbuf->resp_buf) {
> +		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
> +		sgs[outcnt+incnt] = &vresp;
> +		incnt++;
> +	}
> +
> +	spin_lock(&vgdev->ctrlq.qlock);
> +retry:
> +	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		spin_unlock(&vgdev->ctrlq.qlock);
> +		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
> +		spin_lock(&vgdev->ctrlq.qlock);
> +		goto retry;
> +	} else {
> +		virtqueue_kick(vq);
> +	}
> +	spin_unlock(&vgdev->ctrlq.qlock);
> +
> +	if (!ret)
> +		ret = vq->num_free;
> +	return ret;
> +}
> +
> +static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
> +				   struct virtio_gpu_vbuffer *vbuf)
> +{
> +	struct virtqueue *vq = vgdev->cursorq.vq;
> +	struct scatterlist *sgs[1], ccmd;
> +	int ret;
> +	int outcnt;
> +
> +	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
> +	sgs[0] = &ccmd;
> +	outcnt = 1;
> +
> +	spin_lock(&vgdev->cursorq.qlock);
> +retry:
> +	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		spin_unlock(&vgdev->cursorq.qlock);
> +		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
> +		spin_lock(&vgdev->cursorq.qlock);
> +		goto retry;
> +	} else {
> +		virtqueue_kick(vq);
> +	}
> +
> +	spin_unlock(&vgdev->cursorq.qlock);
> +
> +	if (!ret)
> +		ret = vq->num_free;
> +	return ret;
> +}
> +
> +/* just create gem objects for userspace and long lived objects,
> +   just use dma_alloced pages for the queue objects? */
> +
> +/* create a basic resource */
> +int virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
> +				   uint32_t resource_id,
> +				   uint32_t format,
> +				   uint32_t width,
> +				   uint32_t height)
> +{
> +	struct virtio_gpu_resource_create_2d *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->format = cpu_to_le32(format);
> +	cmd_p->width = cpu_to_le32(width);
> +	cmd_p->height = cpu_to_le32(height);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id)
> +{
> +	struct virtio_gpu_resource_unref *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
> +					  uint32_t resource_id)
> +{
> +	struct virtio_gpu_resource_detach_backing *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
> +			       uint32_t scanout_id, uint32_t resource_id,
> +			       uint32_t width, uint32_t height,
> +			       uint32_t x, uint32_t y)
> +{
> +	struct virtio_gpu_set_scanout *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->scanout_id = cpu_to_le32(scanout_id);
> +	cmd_p->r.width = cpu_to_le32(width);
> +	cmd_p->r.height = cpu_to_le32(height);
> +	cmd_p->r.x = cpu_to_le32(x);
> +	cmd_p->r.y = cpu_to_le32(y);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id,
> +				  uint32_t x, uint32_t y,
> +				  uint32_t width, uint32_t height)
> +{
> +	struct virtio_gpu_resource_flush *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->r.width = cpu_to_le32(width);
> +	cmd_p->r.height = cpu_to_le32(height);
> +	cmd_p->r.x = cpu_to_le32(x);
> +	cmd_p->r.y = cpu_to_le32(y);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
> +				       uint32_t resource_id, uint64_t offset,
> +				       __le32 width, __le32 height,
> +				       __le32 x, __le32 y,
> +				       struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_transfer_to_host_2d *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->offset = cpu_to_le64(offset);
> +	cmd_p->r.width = width;
> +	cmd_p->r.height = height;
> +	cmd_p->r.x = x;
> +	cmd_p->r.y = y;
> +
> +	if (fence)
> +		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +static int
> +virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
> +				       uint32_t resource_id,
> +				       struct virtio_gpu_mem_entry *ents,
> +				       uint32_t nents,
> +				       struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_resource_attach_backing *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->nr_entries = cpu_to_le32(nents);
> +
> +	vbuf->data_buf = ents;
> +	vbuf->data_size = sizeof(*ents) * nents;
> +
> +	if (fence)
> +		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
> +					       struct virtio_gpu_vbuffer *vbuf)
> +{
> +	struct virtio_gpu_resp_display_info *resp =
> +		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
> +	int i;
> +
> +	spin_lock(&vgdev->display_info_lock);
> +	for (i = 0; i < vgdev->num_scanouts; i++) {
> +		vgdev->outputs[i].info = resp->pmodes[i];
> +		if (resp->pmodes[i].enabled) {
> +			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
> +				  le32_to_cpu(resp->pmodes[i].r.width),
> +				  le32_to_cpu(resp->pmodes[i].r.height),
> +				  le32_to_cpu(resp->pmodes[i].r.x),
> +				  le32_to_cpu(resp->pmodes[i].r.y));
> +		} else {
> +			DRM_DEBUG("output %d: disabled", i);
> +		}
> +	}
> +
> +	spin_unlock(&vgdev->display_info_lock);
> +	wake_up(&vgdev->resp_wq);
> +
> +	if (!drm_helper_hpd_irq_event(vgdev->ddev)) {
> +		drm_kms_helper_hotplug_event(vgdev->ddev);
> +	}
> +}
> +
> +int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
> +{
> +	struct virtio_gpu_ctrl_hdr *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd_resp
> +		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
> +		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +	return 0;
> +}
> +
> +int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> +			     struct virtio_gpu_object *obj,
> +			     uint32_t resource_id,
> +			     struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_mem_entry *ents;
> +	struct scatterlist *sg;
> +	int si;
> +
> +	if (!obj->pages) {
> +		int ret;
> +		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	/* gets freed when the ring has consumed it */
> +	ents = kmalloc_array(obj->pages->nents,
> +			     sizeof(struct virtio_gpu_mem_entry),
> +			     GFP_KERNEL);
> +	if (!ents) {
> +		DRM_ERROR("failed to allocate ent list\n");
> +		return -ENOMEM;
> +	}
> +
> +	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
> +		ents[si].addr = cpu_to_le64(sg_phys(sg));
> +		ents[si].length = cpu_to_le32(sg->length);
> +		ents[si].padding = 0;
> +	}
> +
> +	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
> +					       ents, obj->pages->nents,
> +					       fence);
> +	obj->hw_res_handle = resource_id;
> +	return 0;
> +}
> +
> +void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
> +			    struct virtio_gpu_output *output)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +	struct virtio_gpu_update_cursor *cur_p;
> +
> +	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
> +	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
> +	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
> +	virtio_gpu_queue_cursor(vgdev, vbuf);
> +}
> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> index e894eb2..a3167fa 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -510,7 +510,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
>  		goto err_enable_device;
>  
>  	rc = pci_request_regions(pci_dev, "virtio-pci");
> -	if (rc)
> +	if (rc && ((pci_dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
>  		goto err_request_regions;
>  
>  	if (force_legacy) {
> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
> index e928625..a1067c4 100644
> --- a/include/drm/drmP.h
> +++ b/include/drm/drmP.h
> @@ -799,6 +799,7 @@ struct drm_device {
>  #endif
>  
>  	struct platform_device *platformdev; /**< Platform device struture */
> +	struct virtio_device *virtdev;
>  
>  	struct drm_sg_mem *sg;	/**< Scatter gather memory */
>  	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
> diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
> index 68ceb97..9707e5d 100644
> --- a/include/uapi/linux/Kbuild
> +++ b/include/uapi/linux/Kbuild
> @@ -429,6 +429,7 @@ header-y += virtio_balloon.h
>  header-y += virtio_blk.h
>  header-y += virtio_config.h
>  header-y += virtio_console.h
> +header-y += virtio_gpu.h
>  header-y += virtio_ids.h
>  header-y += virtio_net.h
>  header-y += virtio_pci.h
> diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
> new file mode 100644
> index 0000000..a1bda52
> --- /dev/null
> +++ b/include/uapi/linux/virtio_gpu.h
> @@ -0,0 +1,203 @@
> +/*
> + * Virtio GPU Device
> + *
> + * Copyright Red Hat, Inc. 2013-2014
> + *
> + * Authors:
> + *     Dave Airlie <airlied@redhat.com>
> + *     Gerd Hoffmann <kraxel@redhat.com>
> + *
> + * This header is BSD licensed so anyone can use the definitions
> + * to implement compatible drivers/servers:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + * 3. Neither the name of IBM nor the names of its contributors
> + *    may be used to endorse or promote products derived from this software
> + *    without specific prior written permission.
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR
> + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
> + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
> + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
> + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#ifndef VIRTIO_GPU_HW_H
> +#define VIRTIO_GPU_HW_H
> +
> +enum virtio_gpu_ctrl_type {
> +	VIRTIO_GPU_UNDEFINED = 0,
> +
> +	/* 2d commands */
> +	VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
> +	VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
> +	VIRTIO_GPU_CMD_RESOURCE_UNREF,
> +	VIRTIO_GPU_CMD_SET_SCANOUT,
> +	VIRTIO_GPU_CMD_RESOURCE_FLUSH,
> +	VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
> +	VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
> +	VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
> +
> +	/* cursor commands */
> +	VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
> +	VIRTIO_GPU_CMD_MOVE_CURSOR,
> +
> +	/* success responses */
> +	VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
> +	VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
> +
> +	/* error responses */
> +	VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
> +	VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
> +	VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
> +	VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
> +	VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
> +	VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
> +};
> +
> +#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
> +
> +struct virtio_gpu_ctrl_hdr {
> +	__le32 type;
> +	__le32 flags;
> +	__le64 fence_id;
> +	__le32 ctx_id;
> +	__le32 padding;
> +};
> +
> +/* data passed in the cursor vq */
> +
> +struct virtio_gpu_cursor_pos {
> +	__le32 scanout_id;
> +	__le32 x, y;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
> +struct virtio_gpu_update_cursor {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_cursor_pos pos;  /* update & move */
> +	__le32 resource_id;           /* update only */
> +	__le32 hot_x;                 /* update only */
> +	__le32 hot_y;                 /* update only */
> +	__le32 padding;
> +};
> +
> +/* data passed in the control vq, 2d related */
> +
> +struct virtio_gpu_rect {
> +	__le32 x, y;
> +	__le32 width;
> +	__le32 height;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
> +struct virtio_gpu_resource_unref {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
> +struct virtio_gpu_resource_create_2d {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 format;
> +	__le32 width;
> +	__le32 height;
> +};
> +
> +/* VIRTIO_GPU_CMD_SET_SCANOUT */
> +struct virtio_gpu_set_scanout {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_rect r;
> +	__le32 scanout_id;
> +	__le32 resource_id;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
> +struct virtio_gpu_resource_flush {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_rect r;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
> +struct virtio_gpu_transfer_to_host_2d {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_rect r;
> +	__le64 offset;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +struct virtio_gpu_mem_entry {
> +	__le64 addr;
> +	__le32 length;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
> +struct virtio_gpu_resource_attach_backing {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 nr_entries;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
> +struct virtio_gpu_resource_detach_backing {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
> +#define VIRTIO_GPU_MAX_SCANOUTS 16
> +struct virtio_gpu_resp_display_info {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_display_one {
> +		struct virtio_gpu_rect r;
> +		__le32 enabled;
> +		__le32 flags;
> +	} pmodes[VIRTIO_GPU_MAX_SCANOUTS];
> +};
> +
> +#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
> +
> +struct virtio_gpu_config {
> +	__u32 events_read;
> +	__u32 events_clear;
> +	__u32 num_scanouts;
> +	__u32 reserved;
> +};
> +
> +/* simple formats for fbcon/X use */
> +enum virtio_gpu_formats {
> +	VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM  = 1,
> +	VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM  = 2,
> +	VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM  = 3,
> +	VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM  = 4,
> +
> +	VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM  = 67,
> +	VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM  = 68,
> +
> +	VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM  = 121,
> +	VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM  = 134,
> +
> +};
> +
> +#endif
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 284fc3a..14d77f7 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -39,5 +39,5 @@
>  #define VIRTIO_ID_9P		9 /* 9p virtio console */
>  #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
>  #define VIRTIO_ID_CAIF	       12 /* Virtio caif */
> -
> +#define VIRTIO_ID_GPU          16
>  #endif /* _LINUX_VIRTIO_IDS_H */
> -- 
> 1.8.3.1
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Michael S. Tsirkin March 24, 2015, 5:04 p.m. UTC | #3
On Tue, Mar 24, 2015 at 05:07:18PM +0100, Gerd Hoffmann wrote:
> From: Dave Airlie <airlied@gmail.com>
> 
> This patch adds a kms driver for the virtio gpu.  The xorg modesetting
> driver can handle the device just fine, the framebuffer for fbcon is
> there too.
> 
> Qemu patches for the host side are under review currently.
> 
> The pci version of the device comes in two variants: with and without
> vga compatibility.  The former has a extra memory bar for the vga
> framebuffer, the later is a pure virtio device.  The only concern for
> this driver is that in the virtio-vga case we have to kick out the
> firmware framebuffer.
> 
> Initial revision has only 2d support, 3d (virgl) support requires
> some more work on the qemu side and will be added later.
> 
> Signed-off-by: Dave Airlie <airlied@redhat.com>
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>

I did a quick scan of the code, below are mostly cosmetic
issues.

> ---
>  drivers/gpu/drm/Kconfig                  |   2 +
>  drivers/gpu/drm/Makefile                 |   1 +
>  drivers/gpu/drm/virtio/Kconfig           |  11 +
>  drivers/gpu/drm/virtio/Makefile          |   9 +
>  drivers/gpu/drm/virtio/virtgpu_debugfs.c |  64 ++++
>  drivers/gpu/drm/virtio/virtgpu_display.c | 527 ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_drm_bus.c |  68 ++++
>  drivers/gpu/drm/virtio/virtgpu_drv.c     | 132 ++++++++
>  drivers/gpu/drm/virtio/virtgpu_drv.h     | 326 +++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_fb.c      | 415 ++++++++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_fence.c   |  95 ++++++
>  drivers/gpu/drm/virtio/virtgpu_gem.c     | 120 +++++++
>  drivers/gpu/drm/virtio/virtgpu_kms.c     | 125 +++++++
>  drivers/gpu/drm/virtio/virtgpu_object.c  | 174 ++++++++++
>  drivers/gpu/drm/virtio/virtgpu_ttm.c     | 451 ++++++++++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_vq.c      | 540 +++++++++++++++++++++++++++++++
>  drivers/virtio/virtio_pci_common.c       |   2 +-
>  include/drm/drmP.h                       |   1 +
>  include/uapi/linux/Kbuild                |   1 +
>  include/uapi/linux/virtio_gpu.h          | 203 ++++++++++++
>  include/uapi/linux/virtio_ids.h          |   2 +-
>  21 files changed, 3267 insertions(+), 2 deletions(-)
>  create mode 100644 drivers/gpu/drm/virtio/Kconfig
>  create mode 100644 drivers/gpu/drm/virtio/Makefile
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_debugfs.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_display.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_drm_bus.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_drv.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_drv.h
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_fb.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_fence.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_gem.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_kms.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_object.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_ttm.c
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_vq.c
>  create mode 100644 include/uapi/linux/virtio_gpu.h
> 
> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index 151a050..f2388ea 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -197,6 +197,8 @@ source "drivers/gpu/drm/qxl/Kconfig"
>  
>  source "drivers/gpu/drm/bochs/Kconfig"
>  
> +source "drivers/gpu/drm/virtio/Kconfig"
> +
>  source "drivers/gpu/drm/msm/Kconfig"
>  
>  source "drivers/gpu/drm/tegra/Kconfig"
> diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> index 2c239b9..083d443 100644
> --- a/drivers/gpu/drm/Makefile
> +++ b/drivers/gpu/drm/Makefile
> @@ -62,6 +62,7 @@ obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
>  obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
>  obj-$(CONFIG_DRM_QXL) += qxl/
>  obj-$(CONFIG_DRM_BOCHS) += bochs/
> +obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
>  obj-$(CONFIG_DRM_MSM) += msm/
>  obj-$(CONFIG_DRM_TEGRA) += tegra/
>  obj-$(CONFIG_DRM_STI) += sti/
> diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
> new file mode 100644
> index 0000000..55868e2
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/Kconfig
> @@ -0,0 +1,11 @@
> +config DRM_VIRTIO_GPU
> +	tristate "QEMU Virtio GPU"

I think it should be "Virtio GPU driver".

> +	depends on DRM && VIRTIO
> +	select FB_SYS_FILLRECT
> +	select FB_SYS_COPYAREA
> +	select FB_SYS_IMAGEBLIT
> +        select DRM_KMS_HELPER
> +        select DRM_KMS_FB_HELPER
> +        select DRM_TTM
> +	help
> +	   QEMU based virtio GPU.

How about:
	This is the virtual GPU driver for virtio.  It can be used with
	lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.


> diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
> new file mode 100644
> index 0000000..57d59ee
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/Makefile
> @@ -0,0 +1,9 @@
> +#
> +# Makefile for the drm device driver.  This driver provides support for the
> +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
> +
> +ccflags-y := -Iinclude/drm
> +
> +virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o
> +
> +obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o

Are cflags hacks and long makefile lines the norm for drm?
If yes this is fine.

> diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> new file mode 100644
> index 0000000..dbc497d
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
> @@ -0,0 +1,64 @@
> +/*
> + * Copyright (C) 2009 Red Hat
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining
> + * a copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sublicense, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial
> + * portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
> + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
> + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
> + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
> + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#include <linux/debugfs.h>
> +
> +#include "drmP.h"
> +#include "virtgpu_drv.h"
> +
> +static int
> +virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
> +{
> +	struct drm_info_node *node = (struct drm_info_node *) m->private;
> +	struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
> +
> +	seq_printf(m, "fence %ld %lld\n",
> +		   atomic64_read(&vgdev->fence_drv.last_seq),
> +		   vgdev->fence_drv.sync_seq);
> +	return 0;
> +}
> +
> +static struct drm_info_list virtio_gpu_debugfs_list[] = {
> +	{ "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
> +};
> +
> +#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
> +
> +int
> +virtio_gpu_debugfs_init(struct drm_minor *minor)
> +{
> +	drm_debugfs_create_files(virtio_gpu_debugfs_list,
> +				 VIRTIO_GPU_DEBUGFS_ENTRIES,
> +				 minor->debugfs_root, minor);
> +	return 0;
> +}
> +
> +void
> +virtio_gpu_debugfs_takedown(struct drm_minor *minor)
> +{
> +	drm_debugfs_remove_files(virtio_gpu_debugfs_list,
> +				 VIRTIO_GPU_DEBUGFS_ENTRIES,
> +				 minor);
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
> new file mode 100644
> index 0000000..578a02c
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_display.c
> @@ -0,0 +1,527 @@
> +/*
> + * Copyright 2013 Red Hat Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors: Dave Airlie
> + *          Alon Levy
> + */
> +
> +#include "virtgpu_drv.h"
> +#include <drm/drm_crtc_helper.h>
> +#include <drm/drm_plane_helper.h>
> +
> +#define XRES_MIN   320
> +#define YRES_MIN   200
> +
> +#define XRES_DEF  1024
> +#define YRES_DEF   768
> +
> +#define XRES_MAX  8192
> +#define YRES_MAX  8192
> +
> +static void virtio_gpu_crtc_gamma_set(struct drm_crtc *crtc,
> +				      u16 *red, u16 *green, u16 *blue,
> +				      uint32_t start, uint32_t size)
> +{
> +	/* TODO */
> +}
> +
> +static void
> +virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
> +		       struct virtio_gpu_output *output)
> +{
> +	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
> +	output->cursor.resource_id = 0;
> +	virtio_gpu_cursor_ping(vgdev, output);
> +}
> +
> +static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
> +				      struct drm_file *file_priv,
> +				      uint32_t handle,
> +				      uint32_t width,
> +				      uint32_t height,
> +				      int32_t hot_x, int32_t hot_y)
> +{
> +	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
> +	struct virtio_gpu_output *output =
> +		container_of(crtc, struct virtio_gpu_output, crtc);
> +	struct drm_gem_object *gobj = NULL;
> +	struct virtio_gpu_object *qobj = NULL;
> +	struct virtio_gpu_fence *fence = NULL;
> +	int ret = 0;
> +
> +	if (handle == 0) {
> +		virtio_gpu_hide_cursor(vgdev, output);
> +		return 0;
> +	}
> +
> +	/* lookup the cursor */
> +	gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
> +	if (gobj == NULL)
> +		return -ENOENT;
> +
> +	qobj = gem_to_virtio_gpu_obj(gobj);
> +
> +	if (!qobj->hw_res_handle) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	ret = virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
> +						 cpu_to_le32(64),
> +						 cpu_to_le32(64),
> +						 0, 0, &fence);
> +	if (!ret) {
> +		reservation_object_add_excl_fence(qobj->tbo.resv,
> +						  &fence->f);
> +		virtio_gpu_object_wait(qobj, false);
> +	}
> +
> +	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
> +	output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
> +	output->cursor.hot_x = cpu_to_le32(hot_x);
> +	output->cursor.hot_y = cpu_to_le32(hot_y);
> +	virtio_gpu_cursor_ping(vgdev, output);
> +out:
> +	drm_gem_object_unreference_unlocked(gobj);
> +	return ret;
> +}
> +
> +static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
> +				    int x, int y)
> +{
> +	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
> +	struct virtio_gpu_output *output =
> +		container_of(crtc, struct virtio_gpu_output, crtc);
> +
> +	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
> +	output->cursor.pos.x = cpu_to_le32(x);
> +	output->cursor.pos.y = cpu_to_le32(y);
> +	virtio_gpu_cursor_ping(vgdev, output);
> +	return 0;
> +}
> +
> +static int virtio_gpu_crtc_page_flip(struct drm_crtc *crtc,
> +				     struct drm_framebuffer *fb,
> +				     struct drm_pending_vblank_event *event,
> +				     uint32_t flags)
> +{
> +	return -EINVAL;
> +}
> +
> +
> +static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
> +	.cursor_set2 = virtio_gpu_crtc_cursor_set,
> +	.cursor_move = virtio_gpu_crtc_cursor_move,
> +	.gamma_set = virtio_gpu_crtc_gamma_set,
> +	.set_config = drm_crtc_helper_set_config,
> +	.page_flip = virtio_gpu_crtc_page_flip,
> +	.destroy = drm_crtc_cleanup,
> +};
> +
> +static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
> +{
> +	struct virtio_gpu_framebuffer *virtio_gpu_fb
> +		= to_virtio_gpu_framebuffer(fb);
> +
> +	if (virtio_gpu_fb->obj)
> +		drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
> +	drm_framebuffer_cleanup(fb);
> +	kfree(virtio_gpu_fb);
> +}
> +
> +static int
> +virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
> +				     struct drm_file *file_priv,
> +				     unsigned flags, unsigned color,
> +				     struct drm_clip_rect *clips,
> +				     unsigned num_clips)
> +{
> +	struct virtio_gpu_framebuffer *virtio_gpu_fb
> +		= to_virtio_gpu_framebuffer(fb);
> +
> +	return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
> +}
> +
> +static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
> +	.destroy = virtio_gpu_user_framebuffer_destroy,
> +	.dirty = virtio_gpu_framebuffer_surface_dirty,
> +};
> +
> +int
> +virtio_gpu_framebuffer_init(struct drm_device *dev,
> +			    struct virtio_gpu_framebuffer *vgfb,
> +			    struct drm_mode_fb_cmd2 *mode_cmd,
> +			    struct drm_gem_object *obj)
> +{
> +	int ret;
> +	struct virtio_gpu_object *bo;
> +	vgfb->obj = obj;
> +
> +	bo = gem_to_virtio_gpu_obj(obj);
> +
> +	ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
> +	if (ret) {
> +		vgfb->obj = NULL;
> +		return ret;
> +	}
> +	drm_helper_mode_fill_fb_struct(&vgfb->base, mode_cmd);
> +
> +	spin_lock_init(&vgfb->dirty_lock);
> +	vgfb->x1 = vgfb->y1 = INT_MAX;
> +	vgfb->x2 = vgfb->y2 = 0;
> +	return 0;
> +}
> +
> +static void virtio_gpu_crtc_dpms(struct drm_crtc *crtc, int mode)
> +{
> +}
> +
> +static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
> +				       const struct drm_display_mode *mode,
> +				       struct drm_display_mode *adjusted_mode)
> +{
> +	return true;
> +}
> +
> +static int virtio_gpu_crtc_mode_set(struct drm_crtc *crtc,
> +				    struct drm_display_mode *mode,
> +				    struct drm_display_mode *adjusted_mode,
> +				    int x, int y,
> +				    struct drm_framebuffer *old_fb)
> +{
> +	struct drm_device *dev = crtc->dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct virtio_gpu_framebuffer *vgfb;
> +	struct virtio_gpu_object *bo, *old_bo = NULL;
> +	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
> +
> +	if (!crtc->primary->fb) {
> +		DRM_DEBUG_KMS("No FB bound\n");
> +		return 0;
> +	}
> +
> +	if (old_fb) {
> +		vgfb = to_virtio_gpu_framebuffer(old_fb);
> +		old_bo = gem_to_virtio_gpu_obj(vgfb->obj);
> +	}
> +	vgfb = to_virtio_gpu_framebuffer(crtc->primary->fb);
> +	bo = gem_to_virtio_gpu_obj(vgfb->obj);
> +	DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
> +		  x, y,
> +		  mode->hdisplay, mode->vdisplay,
> +		  adjusted_mode->hdisplay,
> +		  adjusted_mode->vdisplay);
> +
> +	virtio_gpu_cmd_set_scanout(vgdev, output->index, bo->hw_res_handle,
> +				mode->hdisplay, mode->vdisplay, x, y);
> +
> +	return 0;
> +}
> +
> +static void virtio_gpu_crtc_prepare(struct drm_crtc *crtc)
> +{
> +	DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
> +		  crtc->mode.hdisplay, crtc->mode.vdisplay,
> +		  crtc->x, crtc->y, crtc->enabled);
> +}
> +
> +static void virtio_gpu_crtc_commit(struct drm_crtc *crtc)
> +{
> +	DRM_DEBUG("\n");
> +}
> +
> +static void virtio_gpu_crtc_load_lut(struct drm_crtc *crtc)
> +{
> +}
> +
> +static void virtio_gpu_crtc_disable(struct drm_crtc *crtc)
> +{
> +	struct drm_device *dev = crtc->dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
> +
> +	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
> +}
> +
> +static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
> +	.disable = virtio_gpu_crtc_disable,
> +	.dpms = virtio_gpu_crtc_dpms,
> +	.mode_fixup = virtio_gpu_crtc_mode_fixup,
> +	.mode_set = virtio_gpu_crtc_mode_set,
> +	.prepare = virtio_gpu_crtc_prepare,
> +	.commit = virtio_gpu_crtc_commit,
> +	.load_lut = virtio_gpu_crtc_load_lut,
> +};
> +
> +static void virtio_gpu_enc_dpms(struct drm_encoder *encoder, int mode)
> +{
> +}
> +
> +static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder,
> +				      const struct drm_display_mode *mode,
> +				      struct drm_display_mode *adjusted_mode)
> +{
> +	return true;
> +}
> +
> +static void virtio_gpu_enc_prepare(struct drm_encoder *encoder)
> +{
> +}
> +
> +static void virtio_gpu_enc_commit(struct drm_encoder *encoder)
> +{
> +}
> +
> +static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
> +				    struct drm_display_mode *mode,
> +				    struct drm_display_mode *adjusted_mode)
> +{
> +}
> +
> +static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
> +{
> +	struct virtio_gpu_output *output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +	struct drm_display_mode *mode = NULL;
> +	int count, width, height;
> +
> +	width  = le32_to_cpu(output->info.r.width);
> +	height = le32_to_cpu(output->info.r.height);
> +	count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
> +
> +	if (width == 0 || height == 0) {
> +		width = XRES_DEF;
> +		height = YRES_DEF;
> +		drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
> +	} else {
> +		DRM_DEBUG("add mode: %dx%d\n", width, height);
> +		mode = drm_cvt_mode(connector->dev, width, height, 60,
> +				    false, false, false);
> +		mode->type |= DRM_MODE_TYPE_PREFERRED;
> +		drm_mode_probed_add(connector, mode);
> +		count++;
> +	}
> +
> +	return count;
> +}
> +
> +static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
> +				      struct drm_display_mode *mode)
> +{
> +	struct virtio_gpu_output *output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +	int width, height;
> +
> +	width  = le32_to_cpu(output->info.r.width);
> +	height = le32_to_cpu(output->info.r.height);
> +
> +	if (!(mode->type & DRM_MODE_TYPE_PREFERRED))
> +		return MODE_OK;
> +	if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF)
> +		return MODE_OK;
> +	if (mode->hdisplay <= width  && mode->hdisplay >= width - 16 &&
> +	    mode->vdisplay <= height && mode->vdisplay >= height - 16)
> +		return MODE_OK;
> +
> +	DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay);
> +	return MODE_BAD;
> +}
> +
> +static struct drm_encoder*
> +virtio_gpu_best_encoder(struct drm_connector *connector)
> +{
> +	struct virtio_gpu_output *virtio_gpu_output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +
> +	return &virtio_gpu_output->enc;
> +}
> +
> +
> +static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
> +	.dpms = virtio_gpu_enc_dpms,
> +	.mode_fixup = virtio_gpu_enc_mode_fixup,
> +	.prepare = virtio_gpu_enc_prepare,
> +	.mode_set = virtio_gpu_enc_mode_set,
> +	.commit = virtio_gpu_enc_commit,
> +};
> +
> +static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
> +	.get_modes = virtio_gpu_conn_get_modes,
> +	.mode_valid = virtio_gpu_conn_mode_valid,
> +	.best_encoder = virtio_gpu_best_encoder,
> +};
> +
> +static void virtio_gpu_conn_save(struct drm_connector *connector)
> +{
> +	DRM_DEBUG("\n");
> +}
> +
> +static void virtio_gpu_conn_restore(struct drm_connector *connector)
> +{
> +	DRM_DEBUG("\n");
> +}
> +
> +static enum drm_connector_status virtio_gpu_conn_detect(
> +			struct drm_connector *connector,
> +			bool force)
> +{
> +	struct virtio_gpu_output *output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +
> +	if (output->info.enabled)
> +		return connector_status_connected;
> +	else
> +		return connector_status_disconnected;
> +}
> +
> +static int virtio_gpu_conn_set_property(struct drm_connector *connector,
> +				   struct drm_property *property,
> +				   uint64_t value)
> +{
> +	DRM_DEBUG("\n");
> +	return 0;
> +}
> +
> +static void virtio_gpu_conn_destroy(struct drm_connector *connector)
> +{
> +	struct virtio_gpu_output *virtio_gpu_output =
> +		drm_connector_to_virtio_gpu_output(connector);
> +
> +	drm_connector_unregister(connector);
> +	drm_connector_cleanup(connector);
> +	kfree(virtio_gpu_output);
> +}
> +
> +static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
> +	.dpms = drm_helper_connector_dpms,
> +	.save = virtio_gpu_conn_save,
> +	.restore = virtio_gpu_conn_restore,
> +	.detect = virtio_gpu_conn_detect,
> +	.fill_modes = drm_helper_probe_single_connector_modes,
> +	.set_property = virtio_gpu_conn_set_property,
> +	.destroy = virtio_gpu_conn_destroy,
> +};
> +
> +static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
> +	.destroy = drm_encoder_cleanup,
> +};
> +
> +static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
> +{
> +	struct drm_device *dev = vgdev->ddev;
> +	struct virtio_gpu_output *output = vgdev->outputs + index;
> +	struct drm_connector *connector = &output->conn;
> +	struct drm_encoder *encoder = &output->enc;
> +	struct drm_crtc *crtc = &output->crtc;
> +
> +	output->index = index;
> +	if (index == 0) {
> +		output->info.enabled = cpu_to_le32(true);
> +		output->info.r.width = cpu_to_le32(XRES_DEF);
> +		output->info.r.height = cpu_to_le32(YRES_DEF);
> +	}
> +
> +	drm_crtc_init(dev, crtc, &virtio_gpu_crtc_funcs);
> +	drm_mode_crtc_set_gamma_size(crtc, 256);
> +	drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
> +
> +	drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
> +			   DRM_MODE_CONNECTOR_VIRTUAL);
> +	connector->polled = DRM_CONNECTOR_POLL_HPD;
> +	drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
> +			 DRM_MODE_ENCODER_VIRTUAL);
> +
> +	encoder->possible_crtcs = 1 << index;
> +	drm_mode_connector_attach_encoder(connector, encoder);
> +	drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
> +	drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
> +	drm_connector_register(connector);
> +	return 0;
> +}
> +
> +static struct drm_framebuffer *
> +virtio_gpu_user_framebuffer_create(struct drm_device *dev,
> +				   struct drm_file *file_priv,
> +				   struct drm_mode_fb_cmd2 *mode_cmd)
> +{
> +	struct drm_gem_object *obj = NULL;
> +	struct virtio_gpu_framebuffer *virtio_gpu_fb;
> +	int ret;
> +
> +	/* lookup object associated with res handle */
> +	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
> +	if (!obj)
> +		return ERR_PTR(-EINVAL);
> +
> +	virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
> +	if (virtio_gpu_fb == NULL)
> +		return ERR_PTR(-ENOMEM);
> +
> +	ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
> +	if (ret) {
> +		kfree(virtio_gpu_fb);
> +		if (obj)
> +			drm_gem_object_unreference_unlocked(obj);
> +		return NULL;
> +	}
> +
> +	return &virtio_gpu_fb->base;
> +}
> +
> +static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
> +	.fb_create = virtio_gpu_user_framebuffer_create,
> +};
> +
> +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
> +{
> +	int i;
> +	int ret;
> +
> +	drm_mode_config_init(vgdev->ddev);
> +	vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
> +
> +	/* modes will be validated against the framebuffer size */
> +	vgdev->ddev->mode_config.min_width = XRES_MIN;
> +	vgdev->ddev->mode_config.min_height = YRES_MIN;
> +	vgdev->ddev->mode_config.max_width = XRES_MAX;
> +	vgdev->ddev->mode_config.max_height = YRES_MAX;
> +
> +	for (i = 0 ; i < vgdev->num_scanouts; ++i)
> +		vgdev_output_init(vgdev, i);
> +
> +	/* primary surface must be created by this point, to allow
> +	 * issuing command queue commands and having them read by
> +	 * spice server. */
> +	ret = virtio_gpu_fbdev_init(vgdev);
> +	if (ret)
> +		return ret;
> +
> +	ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
> +
> +	drm_kms_helper_poll_init(vgdev->ddev);
> +	return ret;
> +}
> +
> +void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
> +{
> +	virtio_gpu_fbdev_fini(vgdev);
> +	drm_mode_config_cleanup(vgdev->ddev);
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
> new file mode 100644
> index 0000000..e4b50af
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
> @@ -0,0 +1,68 @@
> +#include <linux/pci.h>
> +
> +#include "virtgpu_drv.h"
> +
> +int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
> +{
> +	struct pci_dev *pdev = dev->pdev;
> +
> +	if (pdev) {
> +		return drm_pci_set_busid(dev, master);
> +	}
> +	return 0;
> +}
> +
> +static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
> +{
> +	struct apertures_struct *ap;
> +	bool primary;
> +	ap = alloc_apertures(1);
> +	if (!ap)
> +		return;
> +
> +	ap->ranges[0].base = pci_resource_start(pci_dev, 2);
> +	ap->ranges[0].size = pci_resource_len(pci_dev, 2);
> +
> +	primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
> +		& IORESOURCE_ROM_SHADOW;
> +
> +	remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
> +
> +	kfree(ap);
> +}
> +
> +int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
> +{
> +	struct drm_device *dev;
> +	int ret;
> +
> +	dev = drm_dev_alloc(driver, &vdev->dev);
> +	if (!dev)
> +		return -ENOMEM;
> +	dev->virtdev = vdev;
> +	vdev->priv = dev;
> +
> +	if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
> +		struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
> +		bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
> +		DRM_INFO("pci: %s detected\n",
> +			 vga ? "virtio-vga" : "virtio-gpu-pci");
> +		dev->pdev = pdev;
> +		if (vga)
> +			virtio_pci_kick_out_firmware_fb(pdev);
> +	}
> +
> +	ret = drm_dev_register(dev, 0);
> +	if (ret)
> +		goto err_free;
> +
> +	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
> +		 driver->major, driver->minor, driver->patchlevel,
> +		 driver->date, dev->primary->index);
> +
> +	return 0;
> +
> +err_free:
> +	drm_dev_unref(dev);
> +	return ret;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
> new file mode 100644
> index 0000000..3662e86
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
> @@ -0,0 +1,132 @@
> +/*
> + * 2011 Red Hat, Inc.
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors:
> + *    Dave Airlie <airlied@redhat.com>
> + */
> +
> +#include <linux/module.h>
> +#include <linux/console.h>
> +#include <linux/pci.h>
> +#include "drmP.h"
> +#include "drm/drm.h"
> +
> +#include "virtgpu_drv.h"
> +static struct drm_driver driver;
> +
> +static int virtio_gpu_modeset = -1;
> +
> +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
> +module_param_named(modeset, virtio_gpu_modeset, int, 0400);
> +
> +static int virtio_gpu_probe(struct virtio_device *vdev)
> +{
> +#ifdef CONFIG_VGA_CONSOLE
> +	if (vgacon_text_force() && virtio_gpu_modeset == -1)
> +		return -EINVAL;
> +#endif
> +
> +	if (virtio_gpu_modeset == 0)
> +		return -EINVAL;
> +
> +	return drm_virtio_init(&driver, vdev);
> +}
> +
> +static void virtio_gpu_remove(struct virtio_device *vdev)
> +{
> +	struct drm_device *dev = vdev->priv;
> +	drm_put_dev(dev);
> +}
> +
> +static void virtio_gpu_config_changed(struct virtio_device *vdev)
> +{
> +	struct drm_device *dev = vdev->priv;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +
> +	schedule_work(&vgdev->config_changed_work);
> +}
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static unsigned int features[] = {
> +};
> +static struct virtio_driver virtio_gpu_driver = {
> +	.feature_table = features,
> +	.feature_table_size = ARRAY_SIZE(features),
> +	.driver.name = KBUILD_MODNAME,
> +	.driver.owner = THIS_MODULE,
> +	.id_table = id_table,
> +	.probe = virtio_gpu_probe,
> +	.remove = virtio_gpu_remove,
> +	.config_changed = virtio_gpu_config_changed
> +};
> +
> +module_virtio_driver(virtio_gpu_driver);
> +
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio GPU driver");
> +MODULE_LICENSE("GPL");
> +
> +static const struct file_operations virtio_gpu_driver_fops = {
> +	.owner = THIS_MODULE,
> +	.open = drm_open,
> +	.mmap = virtio_gpu_mmap,
> +	.poll = drm_poll,
> +	.read = drm_read,
> +	.unlocked_ioctl	= drm_ioctl,
> +	.release = drm_release,
> +#ifdef CONFIG_COMPAT
> +	.compat_ioctl = drm_compat_ioctl,
> +#endif
> +	.llseek = noop_llseek,
> +};
> +
> +
> +static struct drm_driver driver = {
> +	.driver_features = DRIVER_MODESET | DRIVER_GEM,
> +	.set_busid = drm_virtio_set_busid,
> +	.load = virtio_gpu_driver_load,
> +	.unload = virtio_gpu_driver_unload,
> +
> +	.dumb_create = virtio_gpu_mode_dumb_create,
> +	.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
> +	.dumb_destroy = virtio_gpu_mode_dumb_destroy,
> +
> +#if defined(CONFIG_DEBUG_FS)
> +	.debugfs_init = virtio_gpu_debugfs_init,
> +	.debugfs_cleanup = virtio_gpu_debugfs_takedown,
> +#endif
> +
> +	.gem_free_object = virtio_gpu_gem_free_object,
> +	.fops = &virtio_gpu_driver_fops,
> +
> +	.name = DRIVER_NAME,
> +	.desc = DRIVER_DESC,
> +	.date = DRIVER_DATE,
> +	.major = DRIVER_MAJOR,
> +	.minor = DRIVER_MINOR,
> +	.patchlevel = DRIVER_PATCHLEVEL,
> +};
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
> new file mode 100644
> index 0000000..6082ec3
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -0,0 +1,326 @@
> +/*
> + * Copyright (C) 2012 Red Hat
> + *
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License v2. See the file COPYING in the main directory of this archive for
> + * more details.
> + */
> +
> +#ifndef VIRTIO_DRV_H
> +#define VIRTIO_DRV_H
> +
> +#include <linux/virtio.h>
> +#include <linux/virtio_ids.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_gpu.h>
> +
> +#include <drm/drmP.h>
> +#include <drm/drm_gem.h>
> +#include <drm/drm_crtc_helper.h>
> +#include <ttm/ttm_bo_api.h>
> +#include <ttm/ttm_bo_driver.h>
> +#include <ttm/ttm_placement.h>
> +#include <ttm/ttm_module.h>
> +
> +#define DRIVER_NAME "virtio_gpu"
> +#define DRIVER_DESC "virtio GPU"
> +#define DRIVER_DATE "0"
> +
> +#define DRIVER_MAJOR 0
> +#define DRIVER_MINOR 0
> +#define DRIVER_PATCHLEVEL 1
> +
> +/* virtgpu_drm_bus.c */
> +int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
> +int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
> +
> +struct virtio_gpu_object {
> +	struct drm_gem_object gem_base;
> +	uint32_t hw_res_handle;
> +
> +	struct sg_table *pages;
> +	void *vmap;
> +	bool dumb;
> +	struct ttm_place                placement_code;
> +	struct ttm_placement		placement;
> +	struct ttm_buffer_object	tbo;
> +	struct ttm_bo_kmap_obj		kmap;
> +};
> +#define gem_to_virtio_gpu_obj(gobj) \
> +	container_of((gobj), struct virtio_gpu_object, gem_base)
> +
> +struct virtio_gpu_vbuffer;
> +struct virtio_gpu_device;
> +
> +typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
> +				   struct virtio_gpu_vbuffer *vbuf);
> +
> +struct virtio_gpu_fence_driver {
> +	atomic64_t       last_seq;
> +	uint64_t         sync_seq;
> +	struct list_head fences;
> +	spinlock_t       lock;
> +};
> +
> +struct virtio_gpu_fence {
> +	struct fence f;
> +	struct virtio_gpu_fence_driver *drv;
> +	struct list_head node;
> +	uint64_t seq;
> +};
> +#define to_virtio_fence(x) \
> +	container_of(x, struct virtio_gpu_fence, f)
> +
> +struct virtio_gpu_vbuffer {
> +	char *buf;
> +	int size;
> +	bool debug_dump_sglists;
> +
> +	void *data_buf;
> +	uint32_t data_size;
> +
> +	char *resp_buf;
> +	int resp_size;
> +
> +	virtio_gpu_resp_cb resp_cb;
> +
> +	struct list_head destroy_list;
> +};
> +
> +struct virtio_gpu_output {
> +	int index;
> +	struct drm_crtc crtc;
> +	struct drm_connector conn;
> +	struct drm_encoder enc;
> +	struct virtio_gpu_display_one info;
> +	struct virtio_gpu_update_cursor cursor;
> +	int cur_x;
> +	int cur_y;
> +};
> +#define drm_crtc_to_virtio_gpu_output(x) \
> +	container_of(x, struct virtio_gpu_output, crtc)
> +#define drm_connector_to_virtio_gpu_output(x) \
> +	container_of(x, struct virtio_gpu_output, conn)
> +#define drm_encoder_to_virtio_gpu_output(x) \
> +	container_of(x, struct virtio_gpu_output, enc)
> +
> +struct virtio_gpu_framebuffer {
> +	struct drm_framebuffer base;
> +	struct drm_gem_object *obj;
> +	int x1, y1, x2, y2; /* dirty rect */
> +	spinlock_t dirty_lock;
> +	uint32_t hw_res_handle;
> +};
> +#define to_virtio_gpu_framebuffer(x) \
> +	container_of(x, struct virtio_gpu_framebuffer, base)
> +
> +struct virtio_gpu_mman {
> +	struct ttm_bo_global_ref        bo_global_ref;
> +	struct drm_global_reference	mem_global_ref;
> +	bool				mem_global_referenced;
> +	struct ttm_bo_device		bdev;
> +};
> +
> +struct virtio_gpu_fbdev;
> +
> +struct virtio_gpu_queue {
> +	struct virtqueue *vq;
> +	spinlock_t qlock;
> +	wait_queue_head_t ack_queue;
> +	struct work_struct dequeue_work;
> +};
> +
> +struct virtio_gpu_device {
> +	struct device *dev;
> +	struct drm_device *ddev;
> +
> +	struct virtio_device *vdev;
> +
> +	struct virtio_gpu_mman mman;
> +
> +	/* pointer to fbdev info structure */
> +	struct virtio_gpu_fbdev *vgfbdev;
> +	struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
> +	uint32_t num_scanouts;
> +
> +	struct virtio_gpu_queue ctrlq;
> +	struct virtio_gpu_queue cursorq;
> +
> +	struct idr	resource_idr;
> +	spinlock_t resource_idr_lock;
> +
> +	wait_queue_head_t resp_wq;
> +	/* current display info */
> +	spinlock_t display_info_lock;
> +
> +	struct virtio_gpu_fence_driver fence_drv;
> +
> +	struct idr	ctx_id_idr;
> +	spinlock_t ctx_id_idr_lock;
> +
> +	struct work_struct config_changed_work;
> +};
> +
> +struct virtio_gpu_fpriv {
> +	uint32_t ctx_id;
> +};
> +
> +/* virtio_ioctl.c */
> +#define DRM_VIRTIO_NUM_IOCTLS 10
> +extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
> +
> +/* virtio_kms.c */
> +int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
> +int virtio_gpu_driver_unload(struct drm_device *dev);
> +
> +/* virtio_gem.c */
> +void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
> +int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_gem_create(struct drm_file *file,
> +			  struct drm_device *dev,
> +			  uint64_t size,
> +			  struct drm_gem_object **obj_p,
> +			  uint32_t *handle_p);
> +struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
> +						  size_t size, bool kernel,
> +						  bool pinned);
> +int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
> +				struct drm_device *dev,
> +				struct drm_mode_create_dumb *args);
> +int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
> +				 struct drm_device *dev,
> +				 uint32_t handle);
> +int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
> +			      struct drm_device *dev,
> +			      uint32_t handle, uint64_t *offset_p);
> +
> +/* virtio_fb */
> +#define VIRTIO_GPUFB_CONN_LIMIT 1
> +int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
> +			     struct drm_clip_rect *clips,
> +			     unsigned num_clips);
> +/* virtio vg */
> +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
> +			       uint32_t *resid);
> +void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
> +int virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
> +				   uint32_t resource_id,
> +				   uint32_t format,
> +				   uint32_t width,
> +				   uint32_t height);
> +int virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id);
> +int virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
> +				       uint32_t resource_id, uint64_t offset,
> +				       __le32 width, __le32 height,
> +				       __le32 x, __le32 y,
> +				       struct virtio_gpu_fence **fence);
> +int virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id,
> +				  uint32_t x, uint32_t y,
> +				  uint32_t width, uint32_t height);
> +int virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
> +			       uint32_t scanout_id, uint32_t resource_id,
> +			       uint32_t width, uint32_t height,
> +			       uint32_t x, uint32_t y);
> +int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> +			     struct virtio_gpu_object *obj,
> +			     uint32_t resource_id,
> +			     struct virtio_gpu_fence **fence);
> +int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
> +			    struct virtio_gpu_output *output);
> +int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
> +int virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
> +					  uint32_t resource_id);
> +void virtio_gpu_ctrl_ack(struct virtqueue *vq);
> +void virtio_gpu_cursor_ack(struct virtqueue *vq);
> +void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
> +void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
> +
> +/* virtio_gpu_display.c */
> +int virtio_gpu_framebuffer_init(struct drm_device *dev,
> +				struct virtio_gpu_framebuffer *vgfb,
> +				struct drm_mode_fb_cmd2 *mode_cmd,
> +				struct drm_gem_object *obj);
> +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
> +
> +/* virtio_gpu_ttm.c */
> +int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
> +void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
> +bool virtio_gpu_ttm_bo_is_virtio_gpu_object(struct ttm_buffer_object *bo);
> +int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
> +
> +/* virtio_gpu_fence.c */
> +int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
> +			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
> +			  struct virtio_gpu_fence **fence);
> +void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
> +				    u64 last_seq);
> +
> +/* virtio_gpu_object */
> +int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
> +			     unsigned long size, bool kernel, bool pinned,
> +			     struct virtio_gpu_object **bo_ptr);
> +int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr);
> +int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
> +				   struct virtio_gpu_object *bo);
> +void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
> +int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
> +
> +static inline struct virtio_gpu_object*
> +virtio_gpu_object_ref(struct virtio_gpu_object *bo)
> +{
> +	ttm_bo_reference(&bo->tbo);
> +	return bo;
> +}
> +
> +static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
> +{
> +	struct ttm_buffer_object *tbo;
> +
> +	if ((*bo) == NULL)
> +		return;
> +	tbo = &((*bo)->tbo);
> +	ttm_bo_unref(&tbo);
> +	if (tbo == NULL)
> +		*bo = NULL;
> +}
> +
> +static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
> +{
> +	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
> +}
> +
> +static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
> +					 bool no_wait)
> +{
> +	int r;
> +
> +	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
> +	if (unlikely(r != 0)) {
> +		if (r != -ERESTARTSYS) {
> +			struct virtio_gpu_device *qdev =
> +				bo->gem_base.dev->dev_private;
> +			dev_err(qdev->dev, "%p reserve failed\n", bo);
> +		}
> +		return r;
> +	}
> +	return 0;
> +}
> +
> +static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
> +{
> +	ttm_bo_unreserve(&bo->tbo);
> +}
> +
> +/* virgl debufs */
> +int virtio_gpu_debugfs_init(struct drm_minor *minor);
> +void virtio_gpu_debugfs_takedown(struct drm_minor *minor);
> +
> +#endif
> diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
> new file mode 100644
> index 0000000..1d79457
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
> @@ -0,0 +1,415 @@
> +#include <drm/drmP.h>
> +#include <drm/drm_fb_helper.h>
> +#include "virtgpu_drv.h"
> +
> +#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
> +
> +struct virtio_gpu_fbdev {
> +	struct drm_fb_helper           helper;
> +	struct virtio_gpu_framebuffer  vgfb;
> +	struct list_head	       fbdev_list;
> +	struct virtio_gpu_device       *vgdev;
> +	struct delayed_work            work;
> +};
> +#define DL_ALIGN_UP(x, a) ALIGN(x, a)
> +#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)

does not work if x < a.
also spaces around - missing.

I would say just open-code x / sizeof long * sizeof long
below and drop both these macros.

> +
> +static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
> +				   bool store, int x, int y,
> +				   int width, int height)
> +{
> +	struct drm_device *dev = fb->base.dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	bool store_for_later = false;
> +	int aligned_x;
> +	int bpp = (fb->base.bits_per_pixel / 8);

don't put () around the whole expression.

> +	int x2, y2;
> +	unsigned long flags;
> +	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
> +
> +	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
> +	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));

missing spaces around - again

> +	x = aligned_x;
> +
> +	if ((width <= 0) ||
> +	    (x + width > fb->base.width) ||
> +	    (y + height > fb->base.height)) {

you don't really need () around < > if using it with ||.


> +		DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
> +			  width, height, x, y,
> +			  fb->base.width, fb->base.height);
> +		return -EINVAL;
> +	}
> +
> +	/* if we are in atomic just store the info
> +	   can't test inside spin lock */
should use a different style for multiline comments.

> +	if (in_atomic() || store)
> +		store_for_later = true;

in_atomic users are suspect, this needs better comments. What are you
trying to test for here? it's usually best just split up code,
or pass a flag from callers that know in which context
they are invoked, but maybe it's justified here.

> +
> +	x2 = x + width - 1;
> +	y2 = y + height - 1;
> +
> +	spin_lock_irqsave(&fb->dirty_lock, flags);
> +
> +	if (fb->y1 < y)
> +		y = fb->y1;
> +	if (fb->y2 > y2)
> +		y2 = fb->y2;
> +	if (fb->x1 < x)
> +		x = fb->x1;
> +	if (fb->x2 > x2)
> +		x2 = fb->x2;
> +
> +	if (store_for_later) {
> +		fb->x1 = x;
> +		fb->x2 = x2;
> +		fb->y1 = y;
> +		fb->y2 = y2;
> +		spin_unlock_irqrestore(&fb->dirty_lock, flags);
> +		return 0;
> +	}
> +
> +	fb->x1 = fb->y1 = INT_MAX;
> +	fb->x2 = fb->y2 = 0;
> +
> +	spin_unlock_irqrestore(&fb->dirty_lock, flags);
> +
> +	{
> +		uint32_t offset;
> +		uint32_t w = x2 - x + 1;
> +		uint32_t h = y2 - y + 1;
> +
> +		offset = (y * fb->base.pitches[0]) + x * bpp;
> +
> +		virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
> +						   offset,
> +						   cpu_to_le32(w),
> +						   cpu_to_le32(h),
> +						   cpu_to_le32(x),
> +						   cpu_to_le32(y),
> +						   NULL);
> +
> +	}
> +	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
> +				      x, y, x2 - x + 1, y2 - y + 1);
> +	return 0;
> +}
> +
> +int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
> +			     struct drm_clip_rect *clips,
> +			     unsigned num_clips)
> +{
> +	struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
> +	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
> +	struct drm_clip_rect norect;
> +	struct drm_clip_rect *clips_ptr;
> +	int left, right, top, bottom;
> +	int i;
> +	int inc = 1;
> +	if (!num_clips) {
> +		num_clips = 1;
> +		clips = &norect;
> +		norect.x1 = norect.y1 = 0;
> +		norect.x2 = vgfb->base.width;
> +		norect.y2 = vgfb->base.height;
> +	}
> +	left = clips->x1;
> +	right = clips->x2;
> +	top = clips->y1;
> +	bottom = clips->y2;
> +
> +	/* skip the first clip rect */
> +	for (i = 1, clips_ptr = clips + inc;
> +	     i < num_clips; i++, clips_ptr += inc) {
> +		left = min_t(int, left, (int)clips_ptr->x1);
> +		right = max_t(int, right, (int)clips_ptr->x2);
> +		top = min_t(int, top, (int)clips_ptr->y1);
> +		bottom = max_t(int, bottom, (int)clips_ptr->y2);
> +	}
> +
> +	if (obj->dumb)
> +		return virtio_gpu_dirty_update(vgfb, false, left, top,
> +					       right - left, bottom - top);
> +
> +	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
> +				      left, top, right - left, bottom - top);
> +	return 0;
> +}
> +
> +static void virtio_gpu_fb_dirty_work(struct work_struct *work)
> +{
> +	struct delayed_work *delayed_work = to_delayed_work(work);
> +	struct virtio_gpu_fbdev *vfbdev =
> +		container_of(delayed_work, struct virtio_gpu_fbdev, work);
> +	struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
> +
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
> +				vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
> +}
> +
> +static void virtio_gpu_3d_fillrect(struct fb_info *info,
> +				   const struct fb_fillrect *rect)
> +{
> +	struct virtio_gpu_fbdev *vfbdev = info->par;
> +	sys_fillrect(info, rect);
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
> +			     rect->width, rect->height);
> +	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
> +}
> +
> +static void virtio_gpu_3d_copyarea(struct fb_info *info,
> +				   const struct fb_copyarea *area)
> +{
> +	struct virtio_gpu_fbdev *vfbdev = info->par;
> +	sys_copyarea(info, area);
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
> +			   area->width, area->height);
> +	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
> +}
> +
> +static void virtio_gpu_3d_imageblit(struct fb_info *info,
> +				    const struct fb_image *image)
> +{
> +	struct virtio_gpu_fbdev *vfbdev = info->par;
> +	sys_imageblit(info, image);
> +	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
> +			     image->width, image->height);
> +	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
> +}
> +
> +static struct fb_ops virtio_gpufb_ops = {
> +	.owner = THIS_MODULE,
> +	.fb_check_var = drm_fb_helper_check_var,
> +	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
> +	.fb_fillrect = virtio_gpu_3d_fillrect,
> +	.fb_copyarea = virtio_gpu_3d_copyarea,
> +	.fb_imageblit = virtio_gpu_3d_imageblit,
> +	.fb_pan_display = drm_fb_helper_pan_display,
> +	.fb_blank = drm_fb_helper_blank,
> +	.fb_setcmap = drm_fb_helper_setcmap,
> +	.fb_debug_enter = drm_fb_helper_debug_enter,
> +	.fb_debug_leave = drm_fb_helper_debug_leave,
> +};
> +
> +static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
> +			      struct virtio_gpu_object *obj)
> +{
> +	return virtio_gpu_object_kmap(obj, NULL);
> +}
> +
> +static int virtio_gpufb_create(struct drm_fb_helper *helper,
> +			       struct drm_fb_helper_surface_size *sizes)
> +{
> +	struct virtio_gpu_fbdev *vfbdev =
> +		container_of(helper, struct virtio_gpu_fbdev, helper);
> +	struct drm_device *dev = helper->dev;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct fb_info *info;
> +	struct drm_framebuffer *fb;
> +	struct drm_mode_fb_cmd2 mode_cmd = {};
> +	struct virtio_gpu_object *obj;
> +	struct device *device = vgdev->dev;
> +	uint32_t resid, format, size;
> +	int ret;
> +
> +	if (sizes->surface_bpp == 24)
> +		sizes->surface_bpp = 32;
> +	mode_cmd.width = sizes->surface_width;
> +	mode_cmd.height = sizes->surface_height;
> +	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
> +	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
> +							  sizes->surface_depth);
> +
> +	switch (mode_cmd.pixel_format) {
> +#ifdef __BIG_ENDIAN
> +	case DRM_FORMAT_XRGB8888:
> +		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_ARGB8888:
> +		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRX8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRA8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBX8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBA8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
> +		break;
> +	case DRM_FORMAT_XBGR8888:
> +		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
> +		break;
> +	case DRM_FORMAT_ABGR8888:
> +		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
> +		break;
> +#else
> +	case DRM_FORMAT_XRGB8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_ARGB8888:
> +		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRX8888:
> +		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_BGRA8888:
> +		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBX8888:
> +		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
> +		break;
> +	case DRM_FORMAT_RGBA8888:
> +		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
> +		break;
> +	case DRM_FORMAT_XBGR8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
> +		break;
> +	case DRM_FORMAT_ABGR8888:
> +		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
> +		break;
> +#endif
> +	default:
> +		format = 0;
> +		break;
> +	}
> +	if (format == 0) {
> +		ret = -EINVAL;
> +		DRM_ERROR("failed to find virtio gpu format for %d\n",
> +			  mode_cmd.pixel_format);
> +		goto fail;
> +	}
> +
> +	size = mode_cmd.pitches[0] * mode_cmd.height;
> +	obj = virtio_gpu_alloc_object(dev, size, false, true);
> +	if (!obj) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
> +	ret = virtio_gpu_resource_id_get(vgdev, &resid);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_cmd_create_resource(vgdev, resid, format,
> +					  mode_cmd.width, mode_cmd.height);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_vmap_fb(vgdev, obj);
> +	if (ret) {
> +		DRM_ERROR("failed to vmap fb %d\n", ret);
> +		goto fail;
> +	}
> +
> +	/* attach the object to the resource */
> +	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
> +	if (ret)
> +		goto fail;
> +
> +	info = framebuffer_alloc(0, device);
> +	if (!info) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
> +	info->par = helper;
> +
> +	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
> +				       &mode_cmd, &obj->gem_base);
> +	if (ret)
> +		goto fail;
> +
> +	fb = &vfbdev->vgfb.base;
> +
> +	vfbdev->helper.fb = fb;
> +	vfbdev->helper.fbdev = info;
> +
> +	strcpy(info->fix.id, "virtiodrmfb");
> +	info->flags = FBINFO_DEFAULT;
> +	info->fbops = &virtio_gpufb_ops;
> +	info->pixmap.flags = FB_PIXMAP_SYSTEM;
> +	ret = fb_alloc_cmap(&info->cmap, 256, 0);
> +	if (ret) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
> +	info->screen_base = obj->vmap;
> +	info->screen_size = obj->gem_base.size;
> +	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
> +	drm_fb_helper_fill_var(info, &vfbdev->helper,
> +			       sizes->fb_width, sizes->fb_height);
> +
> +	info->fix.mmio_start = 0;
> +	info->fix.mmio_len = 0;
> +
> +	return 0;
> +fail:
> +

Seem too simple.  shouldn't this cleanup whatever it allocated?

> +	return -EINVAL;
> +}
> +
> +static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
> +				    struct virtio_gpu_fbdev *vgfbdev)
> +{
> +	struct fb_info *info;
> +	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
> +
> +	if (vgfbdev->helper.fbdev) {
> +		info = vgfbdev->helper.fbdev;
> +
> +		unregister_framebuffer(info);
> +		framebuffer_release(info);
> +	}
> +	if (vgfb->obj)
> +		vgfb->obj = NULL;
> +	drm_fb_helper_fini(&vgfbdev->helper);
> +	drm_framebuffer_cleanup(&vgfb->base);
> +
> +	return 0;
> +}
> +static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
> +	.fb_probe = virtio_gpufb_create,
> +};
> +
> +int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
> +{
> +	struct virtio_gpu_fbdev *vgfbdev;
> +	int bpp_sel = 32; /* TODO: parameter from somewhere? */
> +	int ret;
> +
> +	vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
> +	if (!vgfbdev)
> +		return -ENOMEM;
> +
> +	vgfbdev->vgdev = vgdev;
> +	vgdev->vgfbdev = vgfbdev;
> +	INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
> +
> +	drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
> +			      &virtio_gpu_fb_helper_funcs);
> +	ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
> +				 vgdev->num_scanouts,
> +				 VIRTIO_GPUFB_CONN_LIMIT);
> +	if (ret) {
> +		kfree(vgfbdev);
> +		return ret;
> +	}
> +
> +	drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
> +	drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
> +	return 0;
> +}
> +
> +void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
> +{
> +	if (!vgdev->vgfbdev)
> +		return;
> +
> +	virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
> +	kfree(vgdev->vgfbdev);
> +	vgdev->vgfbdev = NULL;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
> new file mode 100644
> index 0000000..552aa49
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
> @@ -0,0 +1,95 @@
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +
> +static const char *virtio_get_driver_name(struct fence *f)
> +{
> +	return "virtio_gpu";
> +}
> +
> +static const char *virtio_get_timeline_name(struct fence *f)
> +{
> +	return "controlq";
> +}
> +
> +static bool virtio_enable_signaling(struct fence *f)
> +{
> +	return true;
> +}
> +
> +static bool virtio_signaled(struct fence *f)
> +{
> +	struct virtio_gpu_fence *fence = to_virtio_fence(f);
> +
> +	if (atomic64_read(&fence->drv->last_seq) >= fence->seq) {
> +		return true;
> +	}


drop {}

> +	return false;
> +}
> +
> +static void virtio_fence_value_str(struct fence *f, char *str, int size)
> +{
> +	struct virtio_gpu_fence *fence = to_virtio_fence(f);
> +
> +	snprintf(str, size, "%llu", fence->seq);
> +}
> +
> +static void virtio_timeline_value_str(struct fence *f, char *str, int size)
> +{
> +	struct virtio_gpu_fence *fence = to_virtio_fence(f);
> +
> +	snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
> +}
> +
> +static const struct fence_ops virtio_fence_ops = {
> +	.get_driver_name     = virtio_get_driver_name,
> +	.get_timeline_name   = virtio_get_timeline_name,
> +	.enable_signaling    = virtio_enable_signaling,
> +	.signaled            = virtio_signaled,
> +	.wait                = fence_default_wait,
> +	.fence_value_str     = virtio_fence_value_str,
> +	.timeline_value_str  = virtio_timeline_value_str,
> +};
> +
> +int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
> +			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
> +			  struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
> +	unsigned long irq_flags;
> +
> +	*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL);
> +	if ((*fence) == NULL)
> +		return -ENOMEM;
> +
> +	spin_lock_irqsave(&drv->lock, irq_flags);
> +	(*fence)->drv = drv;
> +	(*fence)->seq = ++drv->sync_seq;
> +	fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
> +		   0, (*fence)->seq);
> +	fence_get(&(*fence)->f);
> +	list_add_tail(&(*fence)->node, &drv->fences);
> +	spin_unlock_irqrestore(&drv->lock, irq_flags);
> +
> +	cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
> +	cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
> +	return 0;
> +}
> +
> +void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
> +				    u64 last_seq)
> +{
> +	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
> +	struct virtio_gpu_fence *fence, *tmp;
> +	unsigned long irq_flags;
> +
> +	spin_lock_irqsave(&drv->lock, irq_flags);
> +	atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
> +	list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
> +		if (last_seq < fence->seq)
> +			continue;
> +		fence_signal_locked(&fence->f);
> +		list_del(&fence->node);
> +		fence_put(&fence->f);
> +	}
> +	spin_unlock_irqrestore(&drv->lock, irq_flags);
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
> new file mode 100644
> index 0000000..8bc0a24
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
> @@ -0,0 +1,120 @@
> +
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +
> +void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
> +{
> +	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
> +
> +	if (obj)
> +		virtio_gpu_object_unref(&obj);
> +}
> +
> +struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
> +						  size_t size, bool kernel,
> +						  bool pinned)
> +{
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct virtio_gpu_object *obj;
> +	int ret;
> +
> +	ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	return obj;
> +}
> +
> +int virtio_gpu_gem_create(struct drm_file *file,
> +			  struct drm_device *dev,
> +			  uint64_t size,
> +			  struct drm_gem_object **obj_p,
> +			  uint32_t *handle_p)
> +{
> +	struct virtio_gpu_object *obj;
> +	int ret;
> +	u32 handle;
> +
> +	obj = virtio_gpu_alloc_object(dev, size, false, false);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
> +	if (ret) {
> +		drm_gem_object_release(&obj->gem_base);
> +		return ret;
> +	}
> +
> +	*obj_p = &obj->gem_base;
> +
> +	/* drop reference from allocate - handle holds it now */
> +	drm_gem_object_unreference_unlocked(&obj->gem_base);
> +
> +	*handle_p = handle;
> +	return 0;
> +}
> +
> +int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
> +				struct drm_device *dev,
> +				struct drm_mode_create_dumb *args)
> +{
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct drm_gem_object *gobj;
> +	struct virtio_gpu_object *obj;
> +	int ret;
> +	uint32_t pitch;
> +	uint32_t resid;
> +
> +	pitch = args->width * ((args->bpp + 1) / 8);
> +	args->size = pitch * args->height;
> +	args->size = ALIGN(args->size, PAGE_SIZE);
> +
> +	ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
> +				 &args->handle);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_resource_id_get(vgdev, &resid);
> +	if (ret)
> +		goto fail;
> +
> +	ret = virtio_gpu_cmd_create_resource(vgdev, resid,
> +					  2, args->width, args->height);
> +	if (ret)
> +		goto fail;
> +
> +	/* attach the object to the resource */
> +	obj = gem_to_virtio_gpu_obj(gobj);
> +	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
> +	if (ret)
> +		goto fail;
> +
> +	obj->dumb = true;
> +	args->pitch = pitch;
> +	return ret;
> +fail:
> +	return ret;
> +}
> +
> +int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
> +				 struct drm_device *dev,
> +				 uint32_t handle)
> +{
> +	return drm_gem_handle_delete(file_priv, handle);
> +}
> +
> +int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
> +			      struct drm_device *dev,
> +			      uint32_t handle, uint64_t *offset_p)
> +{
> +	struct drm_gem_object *gobj;
> +	struct virtio_gpu_object *obj;
> +	BUG_ON(!offset_p);
> +	gobj = drm_gem_object_lookup(dev, file_priv, handle);
> +	if (gobj == NULL)
> +		return -ENOENT;
> +	obj = gem_to_virtio_gpu_obj(gobj);
> +	*offset_p = virtio_gpu_object_mmap_offset(obj);
> +	drm_gem_object_unreference_unlocked(gobj);
> +	return 0;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
> new file mode 100644
> index 0000000..45c4beb
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
> @@ -0,0 +1,125 @@
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +
> +static void virtio_gpu_config_changed_work_func(struct work_struct *work)
> +{
> +	struct virtio_gpu_device *vgdev =
> +		container_of(work, struct virtio_gpu_device,
> +			     config_changed_work);
> +	u32 events_read, events_clear = 0;
> +
> +	/* read the config space */
> +	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
> +		     events_read, &events_read);
> +	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
> +		virtio_gpu_cmd_get_display_info(vgdev);
> +		drm_helper_hpd_irq_event(vgdev->ddev);
> +		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
> +	}
> +	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
> +		      events_clear, &events_clear);
> +}
> +
> +static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
> +			       void (*work_func)(struct work_struct *work))
> +{
> +	spin_lock_init(&vgvq->qlock);
> +	init_waitqueue_head(&vgvq->ack_queue);
> +	INIT_WORK(&vgvq->dequeue_work, work_func);

Generally, you must flush wqs on cleanup path,
since qork might be pending.
Maybe it's not needed here, if so needs a comment.


> +}
> +
> +int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
> +{
> +	static vq_callback_t *callbacks[] = {
> +		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
> +	};
> +	static const char *names[] = { "control", "cursor" };
> +
> +	struct virtio_gpu_device *vgdev;
> +	/* this will expand later */
> +	struct virtqueue *vqs[2];
> +	u32 num_scanouts;
> +	int ret;
> +
> +	if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
> +		return -ENODEV;
> +
> +	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
> +	if (!vgdev)
> +		return -ENOMEM;
> +
> +	vgdev->ddev = dev;
> +	dev->dev_private = vgdev;
> +	vgdev->vdev = dev->virtdev;
> +	vgdev->dev = dev->dev;
> +
> +	spin_lock_init(&vgdev->display_info_lock);
> +	spin_lock_init(&vgdev->ctx_id_idr_lock);
> +	idr_init(&vgdev->ctx_id_idr);
> +	spin_lock_init(&vgdev->resource_idr_lock);
> +	idr_init(&vgdev->resource_idr);
> +	init_waitqueue_head(&vgdev->resp_wq);
> +	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
> +	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
> +
> +	spin_lock_init(&vgdev->fence_drv.lock);
> +	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
> +	INIT_WORK(&vgdev->config_changed_work,
> +		  virtio_gpu_config_changed_work_func);
> +
> +	ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
> +					    callbacks, names);
> +	if (ret) {
> +		DRM_ERROR("failed to find virt queues\n");
> +		goto err_vqs;
> +	}
> +	vgdev->ctrlq.vq = vqs[0];
> +	vgdev->cursorq.vq = vqs[1];
> +
> +	ret = virtio_gpu_ttm_init(vgdev);
> +	if (ret) {
> +		DRM_ERROR("failed to init ttm %d\n", ret);
> +		goto err_ttm;
> +	}
> +
> +	/* get display info */
> +	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
> +		     num_scanouts, &num_scanouts);
> +	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
> +				    VIRTIO_GPU_MAX_SCANOUTS);
> +	if (!vgdev->num_scanouts) {
> +		DRM_ERROR("num_scanouts is zero\n");
> +		ret = -EINVAL;
> +		goto err_scanouts;
> +	}
> +
> +	ret = virtio_gpu_modeset_init(vgdev);
> +	if (ret)
> +		goto err_modeset;
> +
> +	virtio_device_ready(vgdev->vdev);
> +	virtio_gpu_cmd_get_display_info(vgdev);
> +	return 0;
> +
> +err_modeset:
> +err_scanouts:
> +	virtio_gpu_ttm_fini(vgdev);
> +err_ttm:
> +	vgdev->vdev->config->del_vqs(vgdev->vdev);
> +err_vqs:
> +	kfree(vgdev);
> +	return ret;
> +}
> +
> +int virtio_gpu_driver_unload(struct drm_device *dev)
> +{
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +

Is below safe to do while device might be sending
us interrupts (before del_vqs)?  I didn't check, but looks
suspicious.

OTOH you must also be careful not to do del_vqs
if your code might kick.

> +	virtio_gpu_modeset_fini(vgdev);
> +	virtio_gpu_ttm_fini(vgdev);
> +	vgdev->vdev->config->del_vqs(vgdev->vdev);
> +	kfree(vgdev);
> +	return 0;
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
> new file mode 100644
> index 0000000..0d98ae4
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
> @@ -0,0 +1,174 @@
> +#include "virtgpu_drv.h"
> +
> +static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
> +{
> +	struct virtio_gpu_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +
> +	bo = container_of(tbo, struct virtio_gpu_object, tbo);
> +	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
> +
> +	if (bo->hw_res_handle)
> +		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
> +	if (bo->pages)
> +		virtio_gpu_object_free_sg_table(bo);
> +	drm_gem_object_release(&bo->gem_base);
> +	kfree(bo);
> +}
> +
> +bool virtio_gpu_ttm_bo_is_virtio_gpu_object(struct ttm_buffer_object *bo)
> +{
> +	if (bo->destroy == &virtio_gpu_ttm_bo_destroy)
> +		return true;
> +	return false;
> +}

this function seems unused.

> +
> +static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
> +					  bool pinned)
> +{
> +	u32 c = 1;
> +	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
> +
> +	vgbo->placement.placement = &vgbo->placement_code;
> +	vgbo->placement.busy_placement = &vgbo->placement_code;
> +	vgbo->placement_code.fpfn = 0;
> +	vgbo->placement_code.lpfn = 0;
> +	vgbo->placement_code.flags =
> +		TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
> +	vgbo->placement.num_placement = c;
> +	vgbo->placement.num_busy_placement = c;
> +
> +}
> +
> +int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
> +			     unsigned long size, bool kernel, bool pinned,
> +			     struct virtio_gpu_object **bo_ptr)
> +{
> +	struct virtio_gpu_object *bo;
> +	enum ttm_bo_type type;
> +	size_t acc_size;
> +	int r;
> +
> +	if (kernel)
> +		type = ttm_bo_type_kernel;
> +	else
> +		type = ttm_bo_type_device;
> +	*bo_ptr = NULL;
> +
> +	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
> +				       sizeof(struct virtio_gpu_object));
> +
> +	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
> +	if (bo == NULL)
> +		return -ENOMEM;
> +	size = roundup(size, PAGE_SIZE);
> +	r = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
> +	if (unlikely(r)) {
> +		kfree(bo);
> +		return r;
> +	}
> +	bo->dumb = false;
> +
> +	virtio_gpu_init_ttm_placement(bo, pinned);
> +	r = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
> +			&bo->placement, 0, !kernel, NULL, acc_size,
> +			NULL, NULL, &virtio_gpu_ttm_bo_destroy);
> +	if (unlikely(r != 0)) {
> +		if (r != -ERESTARTSYS)

That's unusual in kernel.
What's this test in aid of? Needs a comment.

> +			dev_err(vgdev->dev,
> +				"object_init %d failed for (%lu)\n", r,
> +				size);
> +		return r;
> +	}
> +	*bo_ptr = bo;
> +	return 0;
> +}
> +
> +int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr)
> +{
> +	bool is_iomem;
> +	int r;
> +
> +	if (bo->vmap) {
> +		if (ptr)
> +			*ptr = bo->vmap;
> +		return 0;
> +	}
> +	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
> +	if (r)
> +		return r;
> +	bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
> +	if (ptr)
> +		*ptr = bo->vmap;
> +	return 0;
> +}
> +
> +#if 0

is this code useful?

> +void virtio_gpu_object_force_delete(struct virtio_gpu_device *vgdev)
> +{
> +	struct virtio_gpu_object *bo, *n;
> +
> +

two emoty lines

> +	dev_err(vgdev->dev, "Userspace still has active objects !\n");
> +	list_for_each_entry_safe(bo, n, &vgdev->gem.objects, list) {
> +		mutex_lock(&vgdev->ddev->struct_mutex);
> +		dev_err(vgdev->dev, "%p %p %lu %lu force free\n",
> +			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
> +			*((unsigned long *)&bo->gem_base.refcount));
> +		spin_lock(&vgdev->gem.lock);
> +		list_del_init(&bo->list);
> +		spin_unlock(&vgdev->gem.lock);
> +		/* this should unref the ttm bo */
> +		drm_gem_object_unreference(&bo->gem_base);
> +		mutex_unlock(&vgdev->ddev->struct_mutex);
> +	}
> +}
> +#endif
> +
> +int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
> +				   struct virtio_gpu_object *bo)
> +{
> +	int ret;
> +	struct page **pages = bo->tbo.ttm->pages;
> +	int nr_pages = bo->tbo.num_pages;
> +
> +	/* wtf swapping */
> +	if (bo->pages)
> +		return 0;
> +
> +	if (bo->tbo.ttm->state == tt_unpopulated)
> +		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
> +	bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> +	if (!bo->pages)
> +		goto out;
> +
> +	ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
> +					nr_pages << PAGE_SHIFT, GFP_KERNEL);
> +	if (ret)
> +		goto out;
> +	return 0;
> +out:
> +	kfree(bo->pages);
> +	bo->pages = NULL;
> +	return -ENOMEM;
> +}
> +
> +void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
> +{
> +	sg_free_table(bo->pages);
> +	kfree(bo->pages);
> +	bo->pages = NULL;
> +}
> +
> +int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
> +{
> +	int r;
> +
> +	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
> +	if (unlikely(r != 0))
> +		return r;
> +	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
> +	ttm_bo_unreserve(&bo->tbo);
> +	return r;
> +}
> +
> diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
> new file mode 100644
> index 0000000..a6f22e0
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
> @@ -0,0 +1,451 @@
> +/*
> + * Copyright 2013 Red Hat Inc.

It's 2015 isn't it?

> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors: Dave Airlie
> + *          Alon Levy
> + */
> +
> +#include <ttm/ttm_bo_api.h>
> +#include <ttm/ttm_bo_driver.h>
> +#include <ttm/ttm_placement.h>
> +#include <ttm/ttm_page_alloc.h>
> +#include <ttm/ttm_module.h>
> +#include <drm/drmP.h>
> +#include <drm/drm.h>
> +#include "virtgpu_drv.h"
> +
> +#include <linux/delay.h>
> +
> +#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
> +
> +static struct
> +virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
> +{
> +	struct virtio_gpu_mman *mman;
> +	struct virtio_gpu_device *vgdev;
> +
> +	mman = container_of(bdev, struct virtio_gpu_mman, bdev);
> +	vgdev = container_of(mman, struct virtio_gpu_device, mman);
> +	return vgdev;
> +}
> +
> +static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
> +{
> +	return ttm_mem_global_init(ref->object);
> +}
> +
> +static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
> +{
> +	ttm_mem_global_release(ref->object);
> +}
> +
> +static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
> +{
> +	struct drm_global_reference *global_ref;
> +	int r;
> +
> +	vgdev->mman.mem_global_referenced = false;
> +	global_ref = &vgdev->mman.mem_global_ref;
> +	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
> +	global_ref->size = sizeof(struct ttm_mem_global);
> +	global_ref->init = &virtio_gpu_ttm_mem_global_init;
> +	global_ref->release = &virtio_gpu_ttm_mem_global_release;
> +
> +	r = drm_global_item_ref(global_ref);
> +	if (r != 0) {
> +		DRM_ERROR("Failed setting up TTM memory accounting "
> +			  "subsystem.\n");
> +		return r;
> +	}
> +
> +	vgdev->mman.bo_global_ref.mem_glob =
> +		vgdev->mman.mem_global_ref.object;
> +	global_ref = &vgdev->mman.bo_global_ref.ref;
> +	global_ref->global_type = DRM_GLOBAL_TTM_BO;
> +	global_ref->size = sizeof(struct ttm_bo_global);
> +	global_ref->init = &ttm_bo_global_init;
> +	global_ref->release = &ttm_bo_global_release;
> +	r = drm_global_item_ref(global_ref);
> +	if (r != 0) {
> +		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
> +		drm_global_item_unref(&vgdev->mman.mem_global_ref);
> +		return r;
> +	}
> +
> +	vgdev->mman.mem_global_referenced = true;
> +	return 0;
> +}
> +
> +static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
> +{
> +	if (vgdev->mman.mem_global_referenced) {
> +		drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
> +		drm_global_item_unref(&vgdev->mman.mem_global_ref);
> +		vgdev->mman.mem_global_referenced = false;
> +	}
> +}
> +
> +static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
> +static const struct vm_operations_struct *ttm_vm_ops;

What do the globals do? generally it's best to avoid
globals.

> +
> +static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
> +				struct vm_fault *vmf)
> +{
> +	struct ttm_buffer_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +	int r;
> +
> +	bo = (struct ttm_buffer_object *)vma->vm_private_data;
> +	if (bo == NULL)
> +		return VM_FAULT_NOPAGE;
> +	vgdev = virtio_gpu_get_vgdev(bo->bdev);
> +	r = ttm_vm_ops->fault(vma, vmf);
> +	return r;
> +}
> +
> +int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +	struct drm_file *file_priv;
> +	struct virtio_gpu_device *vgdev;
> +	int r;
> +
> +	file_priv = filp->private_data;
> +	vgdev = file_priv->minor->dev->dev_private;
> +	if (vgdev == NULL) {
> +		DRM_ERROR(
> +		 "filp->private_data->minor->dev->dev_private == NULL\n");
> +		return -EINVAL;
> +	}
> +	r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
> +	if (unlikely(r != 0))
> +		return r;
> +	if (unlikely(ttm_vm_ops == NULL)) {
> +		ttm_vm_ops = vma->vm_ops;
> +		virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
> +		virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;

These globals are operated without any locks.
Seems racy.

> +	}
> +	vma->vm_ops = &virtio_gpu_ttm_vm_ops;
> +	return 0;
> +}
> +
> +static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
> +					uint32_t flags)
> +{
> +	return 0;
> +}
> +
> +static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
> +			       struct ttm_buffer_object *bo,
> +			       const struct ttm_place *place,
> +			       struct ttm_mem_reg *mem)
> +{
> +	mem->mm_node = (void *)1;
> +	return 0;
> +}
> +
> +static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
> +				struct ttm_mem_reg *mem)
> +{
> +	mem->mm_node = (void *)NULL;
> +	return;
> +}
> +
> +static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
> +			   unsigned long p_size)
> +{
> +	return 0;
> +}
> +
> +static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
> +{
> +	return 0;
> +}
> +
> +static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
> +			     const char *prefix)
> +{
> +}
> +
> +static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
> +	ttm_bo_man_init,
> +	ttm_bo_man_takedown,
> +	ttm_bo_man_get_node,
> +	ttm_bo_man_put_node,
> +	ttm_bo_man_debug
> +};
> +
> +static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
> +				    struct ttm_mem_type_manager *man)
> +{
> +	struct virtio_gpu_device *vgdev;
> +
> +	vgdev = virtio_gpu_get_vgdev(bdev);
> +
> +	switch (type) {
> +	case TTM_PL_SYSTEM:
> +		/* System memory */
> +		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> +		man->available_caching = TTM_PL_MASK_CACHING;
> +		man->default_caching = TTM_PL_FLAG_CACHED;
> +		break;
> +	case TTM_PL_TT:
> +		man->func = &virtio_gpu_bo_manager_func;
> +		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> +		man->available_caching = TTM_PL_MASK_CACHING;
> +		man->default_caching = TTM_PL_FLAG_CACHED;
> +		break;
> +	default:
> +		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
> +				struct ttm_placement *placement)
> +{
> +	static struct ttm_place placements = {
> +		.fpfn  = 0,
> +		.lpfn  = 0,
> +		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
> +	};
> +
> +	placement->placement = &placements;
> +	placement->busy_placement = &placements;
> +	placement->num_placement = 1;
> +	placement->num_busy_placement = 1;
> +	return;
> +}
> +
> +static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
> +				    struct file *filp)
> +{
> +	return 0;
> +}
> +
> +static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
> +					 struct ttm_mem_reg *mem)
> +{
> +	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
> +
> +	mem->bus.addr = NULL;
> +	mem->bus.offset = 0;
> +	mem->bus.size = mem->num_pages << PAGE_SHIFT;
> +	mem->bus.base = 0;
> +	mem->bus.is_iomem = false;
> +	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
> +		return -EINVAL;
> +	switch (mem->mem_type) {
> +	case TTM_PL_SYSTEM:
> +	case TTM_PL_TT:
> +		/* system memory */
> +		return 0;
> +	default:
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
> +				       struct ttm_mem_reg *mem)
> +{
> +}
> +
> +/*
> + * TTM backend functions.
> + */
> +struct virtio_gpu_ttm_tt {
> +	struct ttm_dma_tt		ttm;
> +	struct virtio_gpu_device		*vgdev;
> +	u64				offset;
> +};
> +
> +static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
> +				       struct ttm_mem_reg *bo_mem)
> +{
> +	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
> +
> +	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
> +	if (!ttm->num_pages) {
> +		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
> +		     ttm->num_pages, bo_mem, ttm);
> +	}

single line within {}, pls drop {}/

> +	/* Not implemented */

What isn't?

> +	return 0;
> +}
> +
> +static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
> +{
> +	/* Not implemented */
> +	return 0;

comment does not seem helpful.
0 means failure here?

> +}
> +
> +static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
> +{
> +	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
> +
> +	ttm_dma_tt_fini(&gtt->ttm);
> +	kfree(gtt);
> +}
> +
> +static struct ttm_backend_func virtio_gpu_backend_func = {
> +	.bind = &virtio_gpu_ttm_backend_bind,
> +	.unbind = &virtio_gpu_ttm_backend_unbind,
> +	.destroy = &virtio_gpu_ttm_backend_destroy,
> +};
> +
> +static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
> +{
> +	if (ttm->state != tt_unpopulated)
> +		return 0;
> +
> +	return ttm_pool_populate(ttm);
> +}
> +
> +static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
> +{
> +	ttm_pool_unpopulate(ttm);
> +}
> +
> +static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
> +					       unsigned long size,
> +					       uint32_t page_flags,
> +					       struct page *dummy_read_page)
> +{
> +	struct virtio_gpu_device *vgdev;
> +	struct virtio_gpu_ttm_tt *gtt;
> +
> +	vgdev = virtio_gpu_get_vgdev(bdev);
> +	gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
> +	if (gtt == NULL)
> +		return NULL;
> +	gtt->ttm.ttm.func = &virtio_gpu_backend_func;
> +	gtt->vgdev = vgdev;
> +	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
> +			    dummy_read_page)) {
> +		kfree(gtt);
> +		return NULL;
> +	}
> +	return &gtt->ttm.ttm;
> +}
> +
> +static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
> +				 struct ttm_mem_reg *new_mem)
> +{
> +	struct ttm_mem_reg *old_mem = &bo->mem;
> +
> +	BUG_ON(old_mem->mm_node != NULL);
> +	*old_mem = *new_mem;
> +	new_mem->mm_node = NULL;
> +}
> +
> +static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
> +			      bool evict, bool interruptible,
> +			      bool no_wait_gpu,
> +			      struct ttm_mem_reg *new_mem)
> +{
> +	virtio_gpu_move_null(bo, new_mem);
> +	return 0;
> +}
> +
> +static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
> +				      struct ttm_mem_reg *new_mem)
> +{
> +	struct virtio_gpu_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +
> +	bo = container_of(tbo, struct virtio_gpu_object, tbo);
> +	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
> +
> +	if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
> +		if (bo->hw_res_handle)
> +			virtio_gpu_cmd_resource_inval_backing(vgdev,
> +							   bo->hw_res_handle);
> +
> +	} else if (new_mem->placement & TTM_PL_FLAG_TT) {
> +		if (bo->hw_res_handle) {
> +			virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
> +					      NULL);
> +		}
> +	}
> +
> +	return;

return here is useless.

> +}
> +
> +static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
> +{
> +	struct virtio_gpu_object *bo;
> +	struct virtio_gpu_device *vgdev;
> +
> +	bo = container_of(tbo, struct virtio_gpu_object, tbo);
> +	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
> +
> +	if (bo->pages)
> +		virtio_gpu_object_free_sg_table(bo);
> +}
> +
> +static struct ttm_bo_driver virtio_gpu_bo_driver = {
> +	.ttm_tt_create = &virtio_gpu_ttm_tt_create,
> +	.ttm_tt_populate = &virtio_gpu_ttm_tt_populate,
> +	.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
> +	.invalidate_caches = &virtio_gpu_invalidate_caches,
> +	.init_mem_type = &virtio_gpu_init_mem_type,
> +	.evict_flags = &virtio_gpu_evict_flags,
> +	.move = &virtio_gpu_bo_move,
> +	.verify_access = &virtio_gpu_verify_access,
> +	.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
> +	.io_mem_free = &virtio_gpu_ttm_io_mem_free,
> +	.move_notify = &virtio_gpu_bo_move_notify,
> +	.swap_notify = &virtio_gpu_bo_swap_notify,
> +};
> +
> +int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
> +{
> +	int r;
> +
> +	r = virtio_gpu_ttm_global_init(vgdev);
> +	if (r)
> +		return r;
> +	/* No others user of address space so set it to 0 */

No other users?

> +	r = ttm_bo_device_init(&vgdev->mman.bdev,
> +			       vgdev->mman.bo_global_ref.ref.object,
> +			       &virtio_gpu_bo_driver,
> +			       vgdev->ddev->anon_inode->i_mapping,
> +			       DRM_FILE_PAGE_OFFSET, 0);
> +	if (r) {
> +		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
> +		return r;
> +	}
> +
> +	r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
> +	if (r) {
> +		DRM_ERROR("Failed initializing GTT heap.\n");

should revert effect of ttm_bo_device_init and
virtio_gpu_ttm_global_init?

> +		return r;
> +	}
> +	return 0;
> +}
> +
> +void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
> +{
> +	ttm_bo_device_release(&vgdev->mman.bdev);
> +	virtio_gpu_ttm_global_fini(vgdev);
> +	DRM_INFO("virtio_gpu: ttm finalized\n");
> +}
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
> new file mode 100644
> index 0000000..a98cda8
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -0,0 +1,540 @@
> +#include <drm/drmP.h>
> +#include "virtgpu_drv.h"
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_ring.h>
> +
> +
> +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
> +{
> +	int handle;
> +
> +	idr_preload(GFP_KERNEL);
> +	spin_lock(&vgdev->resource_idr_lock);
> +	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
> +	spin_unlock(&vgdev->resource_idr_lock);
> +	idr_preload_end();
> +	*resid = handle;
> +	return 0;
> +}
> +
> +void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
> +{
> +	spin_lock(&vgdev->resource_idr_lock);
> +	idr_remove(&vgdev->resource_idr, id);
> +	spin_unlock(&vgdev->resource_idr_lock);
> +}
> +
> +void virtio_gpu_ctrl_ack(struct virtqueue *vq)
> +{
> +	struct drm_device *dev = vq->vdev->priv;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	schedule_work(&vgdev->ctrlq.dequeue_work);
> +}
> +
> +void virtio_gpu_cursor_ack(struct virtqueue *vq)
> +{
> +	struct drm_device *dev = vq->vdev->priv;
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	schedule_work(&vgdev->cursorq.dequeue_work);
> +}
> +
> +static struct virtio_gpu_vbuffer*
> +virtio_gpu_allocate_vbuf(struct virtio_gpu_device *vgdev,
> +			 int size, int resp_size,
> +			 virtio_gpu_resp_cb resp_cb)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = kzalloc(sizeof(*vbuf) + size + resp_size, GFP_KERNEL);
> +	if (!vbuf)
> +		goto fail;
> +
> +	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
> +	vbuf->size = size;
> +
> +	vbuf->resp_cb = resp_cb;
> +	if (resp_size)
> +		vbuf->resp_buf = (void *)vbuf->buf + size;
> +	else
> +		vbuf->resp_buf = NULL;
> +	vbuf->resp_size = resp_size;
> +
> +	return vbuf;
> +fail:
> +	kfree(vbuf);
> +	return ERR_PTR(-ENOMEM);
> +}
> +
> +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
> +				  struct virtio_gpu_vbuffer **vbuffer_p,
> +				  int size)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = virtio_gpu_allocate_vbuf(vgdev, size,
> +				     sizeof(struct virtio_gpu_ctrl_hdr), NULL);
> +	if (IS_ERR(vbuf)) {
> +		*vbuffer_p = NULL;
> +		return ERR_CAST(vbuf);

Nice, but then all callers fail to validate the returned values.
What was the point then?

> +	}
> +	*vbuffer_p = vbuf;
> +	return vbuf->buf;
> +}
> +
> +static struct virtio_gpu_update_cursor*
> +virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
> +			struct virtio_gpu_vbuffer **vbuffer_p)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = virtio_gpu_allocate_vbuf
> +		(vgdev, sizeof(struct virtio_gpu_update_cursor), 0, NULL);
> +	if (IS_ERR(vbuf)) {
> +		*vbuffer_p = NULL;
> +		return ERR_CAST(vbuf);
> +	}
> +	*vbuffer_p = vbuf;
> +	return (struct virtio_gpu_update_cursor *)vbuf->buf;
> +}
> +
> +static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
> +				       virtio_gpu_resp_cb cb,
> +				       struct virtio_gpu_vbuffer **vbuffer_p,
> +				       int cmd_size, int resp_size)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	vbuf = virtio_gpu_allocate_vbuf(vgdev, cmd_size, resp_size, cb);
> +	if (IS_ERR(vbuf)) {
> +		*vbuffer_p = NULL;
> +		return ERR_CAST(vbuf);

Same issue as virtio_gpu_alloc_cmd

> +	}
> +	*vbuffer_p = vbuf;
> +	return (struct virtio_gpu_command *)vbuf->buf;
> +}
> +
> +static void free_vbuf(struct virtio_gpu_device *vgdev,
> +		      struct virtio_gpu_vbuffer *vbuf)
> +{
> +	kfree(vbuf->data_buf);
> +	kfree(vbuf);
> +}
> +
> +static int reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +	unsigned int len;
> +	int freed = 0;
> +	while ((vbuf = virtqueue_get_buf(vq, &len))) {
> +		list_add_tail(&vbuf->destroy_list, reclaim_list);
> +		freed++;
> +	}
> +	return freed;
> +}
> +
> +void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
> +{
> +	struct virtio_gpu_device *vgdev =
> +		container_of(work, struct virtio_gpu_device,
> +			     ctrlq.dequeue_work);
> +	int ret;
> +	struct list_head reclaim_list;
> +	struct virtio_gpu_vbuffer *entry, *tmp;
> +	struct virtio_gpu_ctrl_hdr *resp;
> +	u64 fence_id = 0;
> +
> +	INIT_LIST_HEAD(&reclaim_list);
> +	spin_lock(&vgdev->ctrlq.qlock);
> +	do {
> +		virtqueue_disable_cb(vgdev->ctrlq.vq);
> +		ret = reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
> +		if (ret == 0)
> +			DRM_DEBUG("cleaned 0 buffers wierd\n");

wierd spelling intentional?

> +
> +	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
> +	spin_unlock(&vgdev->ctrlq.qlock);
> +
> +	list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
> +		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
> +		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
> +			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
> +		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
> +			u64 f = le64_to_cpu(resp->fence_id);
> +
> +			if (fence_id > f) {
> +				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
> +					  __func__, fence_id, f);
> +			} else {
> +				fence_id = f;
> +			}
> +		}
> +		if (entry->resp_cb)
> +			entry->resp_cb(vgdev, entry);
> +
> +		list_del(&entry->destroy_list);
> +		free_vbuf(vgdev, entry);
> +	}
> +	wake_up(&vgdev->ctrlq.ack_queue);
> +
> +	if (fence_id) {
> +		virtio_gpu_fence_event_process(vgdev, fence_id);
> +	}

don't put {} around single line blocks.

> +}
> +
> +void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
> +{
> +	struct virtio_gpu_device *vgdev =
> +		container_of(work, struct virtio_gpu_device,
> +			     cursorq.dequeue_work);
> +	struct virtqueue *vq = vgdev->cursorq.vq;
> +	struct list_head reclaim_list;
> +	struct virtio_gpu_vbuffer *entry, *tmp;
> +	unsigned int len;
> +	int ret;
> +
> +	INIT_LIST_HEAD(&reclaim_list);
> +	spin_lock(&vgdev->cursorq.qlock);
> +	do {
> +		virtqueue_disable_cb(vgdev->cursorq.vq);
> +		ret = reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
> +		if (ret == 0)
> +			DRM_DEBUG("cleaned 0 buffers wierd\n");
> +		while (virtqueue_get_buf(vq, &len))
> +			/* nothing */;
> +	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
> +	spin_unlock(&vgdev->cursorq.qlock);
> +
> +	list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
> +		list_del(&entry->destroy_list);
> +		free_vbuf(vgdev, entry);
> +	}
> +	wake_up(&vgdev->cursorq.ack_queue);
> +}
> +
> +static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
> +					struct virtio_gpu_vbuffer *vbuf)
> +{
> +	struct virtqueue *vq = vgdev->ctrlq.vq;
> +	struct scatterlist *sgs[3], vcmd, vout, vresp;
> +	int outcnt = 0, incnt = 0;
> +	int ret;
> +
> +	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
> +	sgs[outcnt+incnt] = &vcmd;
> +	outcnt++;
> +
> +	if (vbuf->data_buf) {
> +		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
> +		sgs[outcnt+incnt] = &vout;

Need space around + here and elsewhere.

> +		outcnt++;
> +	}
> +
> +	if (vbuf->resp_buf) {
> +		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
> +		sgs[outcnt+incnt] = &vresp;
> +		incnt++;
> +	}
> +
> +	spin_lock(&vgdev->ctrlq.qlock);
> +retry:
> +	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		spin_unlock(&vgdev->ctrlq.qlock);
> +		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
> +		spin_lock(&vgdev->ctrlq.qlock);
> +		goto retry;
> +	} else {
> +		virtqueue_kick(vq);
> +	}

This can fail on OOM too since you are
using GFP_ATOMIC, you mustn't fail in this case,
you should handle this gracefully by retrying
later.

Maybe change qlock to a mutex, and GFP_ATOMIC to GFP_KERNEL.

> +	spin_unlock(&vgdev->ctrlq.qlock);
> +
> +	if (!ret)
> +		ret = vq->num_free;
> +	return ret;
> +}
> +
> +static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
> +				   struct virtio_gpu_vbuffer *vbuf)
> +{
> +	struct virtqueue *vq = vgdev->cursorq.vq;
> +	struct scatterlist *sgs[1], ccmd;
> +	int ret;
> +	int outcnt;
> +
> +	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
> +	sgs[0] = &ccmd;
> +	outcnt = 1;
> +
> +	spin_lock(&vgdev->cursorq.qlock);
> +retry:
> +	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		spin_unlock(&vgdev->cursorq.qlock);
> +		wait_event(vgdev->cursorq.ack_queue, vq->num_free);

checking num_free outside lock seems risky.
For example, you might see a stale num_free
value and block forever.

> +		spin_lock(&vgdev->cursorq.qlock);
> +		goto retry;
> +	} else {
> +		virtqueue_kick(vq);
> +	}
> +
> +	spin_unlock(&vgdev->cursorq.qlock);
> +
> +	if (!ret)
> +		ret = vq->num_free;
> +	return ret;
> +}
> +
> +/* just create gem objects for userspace and long lived objects,
> +   just use dma_alloced pages for the queue objects? */
> +
> +/* create a basic resource */
> +int virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
> +				   uint32_t resource_id,
> +				   uint32_t format,
> +				   uint32_t width,
> +				   uint32_t height)
> +{
> +	struct virtio_gpu_resource_create_2d *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->format = cpu_to_le32(format);
> +	cmd_p->width = cpu_to_le32(width);
> +	cmd_p->height = cpu_to_le32(height);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id)
> +{
> +	struct virtio_gpu_resource_unref *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
> +					  uint32_t resource_id)
> +{
> +	struct virtio_gpu_resource_detach_backing *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
> +			       uint32_t scanout_id, uint32_t resource_id,
> +			       uint32_t width, uint32_t height,
> +			       uint32_t x, uint32_t y)
> +{
> +	struct virtio_gpu_set_scanout *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->scanout_id = cpu_to_le32(scanout_id);
> +	cmd_p->r.width = cpu_to_le32(width);
> +	cmd_p->r.height = cpu_to_le32(height);
> +	cmd_p->r.x = cpu_to_le32(x);
> +	cmd_p->r.y = cpu_to_le32(y);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
> +				  uint32_t resource_id,
> +				  uint32_t x, uint32_t y,
> +				  uint32_t width, uint32_t height)
> +{
> +	struct virtio_gpu_resource_flush *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->r.width = cpu_to_le32(width);
> +	cmd_p->r.height = cpu_to_le32(height);
> +	cmd_p->r.x = cpu_to_le32(x);
> +	cmd_p->r.y = cpu_to_le32(y);
> +
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +int virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
> +				       uint32_t resource_id, uint64_t offset,
> +				       __le32 width, __le32 height,
> +				       __le32 x, __le32 y,
> +				       struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_transfer_to_host_2d *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->offset = cpu_to_le64(offset);
> +	cmd_p->r.width = width;
> +	cmd_p->r.height = height;
> +	cmd_p->r.x = x;
> +	cmd_p->r.y = y;
> +
> +	if (fence)
> +		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +static int
> +virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
> +				       uint32_t resource_id,
> +				       struct virtio_gpu_mem_entry *ents,
> +				       uint32_t nents,
> +				       struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_resource_attach_backing *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
> +	cmd_p->resource_id = cpu_to_le32(resource_id);
> +	cmd_p->nr_entries = cpu_to_le32(nents);
> +
> +	vbuf->data_buf = ents;
> +	vbuf->data_size = sizeof(*ents) * nents;
> +
> +	if (fence)
> +		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +
> +	return 0;
> +}
> +
> +static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
> +					       struct virtio_gpu_vbuffer *vbuf)
> +{
> +	struct virtio_gpu_resp_display_info *resp =
> +		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
> +	int i;
> +
> +	spin_lock(&vgdev->display_info_lock);
> +	for (i = 0; i < vgdev->num_scanouts; i++) {
> +		vgdev->outputs[i].info = resp->pmodes[i];
> +		if (resp->pmodes[i].enabled) {
> +			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
> +				  le32_to_cpu(resp->pmodes[i].r.width),
> +				  le32_to_cpu(resp->pmodes[i].r.height),
> +				  le32_to_cpu(resp->pmodes[i].r.x),
> +				  le32_to_cpu(resp->pmodes[i].r.y));
> +		} else {
> +			DRM_DEBUG("output %d: disabled", i);
> +		}
> +	}
> +
> +	spin_unlock(&vgdev->display_info_lock);
> +	wake_up(&vgdev->resp_wq);
> +
> +	if (!drm_helper_hpd_irq_event(vgdev->ddev)) {
> +		drm_kms_helper_hotplug_event(vgdev->ddev);
> +	}

Don't put {} around single-statements.

> +}
> +
> +int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
> +{
> +	struct virtio_gpu_ctrl_hdr *cmd_p;
> +	struct virtio_gpu_vbuffer *vbuf;
> +
> +	cmd_p = virtio_gpu_alloc_cmd_resp
> +		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
> +		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info));

Pls split it like this:
+	cmd_p = virtio_gpu_alloc_cmd_resp(vgdev,
+					  &virtio_gpu_cmd_get_display_info_cb,
+					  &vbuf,
+			 		  sizeof(*cmd_p),
+					  sizeof(struct virtio_gpu_resp_display_info));

i.e. ( on same line as function name.


> +	memset(cmd_p, 0, sizeof(*cmd_p));
> +
> +	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
> +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> +	return 0;
> +}

make this function void?

> +
> +int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> +			     struct virtio_gpu_object *obj,
> +			     uint32_t resource_id,
> +			     struct virtio_gpu_fence **fence)
> +{
> +	struct virtio_gpu_mem_entry *ents;
> +	struct scatterlist *sg;
> +	int si;
> +
> +	if (!obj->pages) {
> +		int ret;
> +		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	/* gets freed when the ring has consumed it */
> +	ents = kmalloc_array(obj->pages->nents,
> +			     sizeof(struct virtio_gpu_mem_entry),
> +			     GFP_KERNEL);
> +	if (!ents) {
> +		DRM_ERROR("failed to allocate ent list\n");
> +		return -ENOMEM;
> +	}
> +
> +	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
> +		ents[si].addr = cpu_to_le64(sg_phys(sg));
> +		ents[si].length = cpu_to_le32(sg->length);
> +		ents[si].padding = 0;
> +	}
> +
> +	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
> +					       ents, obj->pages->nents,
> +					       fence);
> +	obj->hw_res_handle = resource_id;
> +	return 0;
> +}
> +
> +void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
> +			    struct virtio_gpu_output *output)
> +{
> +	struct virtio_gpu_vbuffer *vbuf;
> +	struct virtio_gpu_update_cursor *cur_p;
> +
> +	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
> +	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
> +	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
> +	virtio_gpu_queue_cursor(vgdev, vbuf);
> +}
> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> index e894eb2..a3167fa 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -510,7 +510,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
>  		goto err_enable_device;
>  
>  	rc = pci_request_regions(pci_dev, "virtio-pci");
> -	if (rc)
> +	if (rc && ((pci_dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
>  		goto err_request_regions;
>  
>  	if (force_legacy) {
> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
> index e928625..a1067c4 100644
> --- a/include/drm/drmP.h
> +++ b/include/drm/drmP.h
> @@ -799,6 +799,7 @@ struct drm_device {
>  #endif
>  
>  	struct platform_device *platformdev; /**< Platform device struture */
> +	struct virtio_device *virtdev;
>  
>  	struct drm_sg_mem *sg;	/**< Scatter gather memory */
>  	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
> diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
> index 68ceb97..9707e5d 100644
> --- a/include/uapi/linux/Kbuild
> +++ b/include/uapi/linux/Kbuild
> @@ -429,6 +429,7 @@ header-y += virtio_balloon.h
>  header-y += virtio_blk.h
>  header-y += virtio_config.h
>  header-y += virtio_console.h
> +header-y += virtio_gpu.h
>  header-y += virtio_ids.h
>  header-y += virtio_net.h
>  header-y += virtio_pci.h
> diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
> new file mode 100644
> index 0000000..a1bda52
> --- /dev/null
> +++ b/include/uapi/linux/virtio_gpu.h
> @@ -0,0 +1,203 @@
> +/*
> + * Virtio GPU Device
> + *
> + * Copyright Red Hat, Inc. 2013-2014
> + *
> + * Authors:
> + *     Dave Airlie <airlied@redhat.com>
> + *     Gerd Hoffmann <kraxel@redhat.com>
> + *
> + * This header is BSD licensed so anyone can use the definitions
> + * to implement compatible drivers/servers:
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + * 3. Neither the name of IBM nor the names of its contributors
> + *    may be used to endorse or promote products derived from this software
> + *    without specific prior written permission.
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR
> + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
> + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
> + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
> + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#ifndef VIRTIO_GPU_HW_H
> +#define VIRTIO_GPU_HW_H
> +
> +enum virtio_gpu_ctrl_type {
> +	VIRTIO_GPU_UNDEFINED = 0,
> +
> +	/* 2d commands */
> +	VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
> +	VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
> +	VIRTIO_GPU_CMD_RESOURCE_UNREF,
> +	VIRTIO_GPU_CMD_SET_SCANOUT,
> +	VIRTIO_GPU_CMD_RESOURCE_FLUSH,
> +	VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
> +	VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
> +	VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
> +
> +	/* cursor commands */
> +	VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
> +	VIRTIO_GPU_CMD_MOVE_CURSOR,
> +
> +	/* success responses */
> +	VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
> +	VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
> +
> +	/* error responses */
> +	VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
> +	VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
> +	VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
> +	VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
> +	VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
> +	VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
> +};
> +
> +#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
> +
> +struct virtio_gpu_ctrl_hdr {
> +	__le32 type;
> +	__le32 flags;
> +	__le64 fence_id;
> +	__le32 ctx_id;
> +	__le32 padding;
> +};
> +
> +/* data passed in the cursor vq */
> +
> +struct virtio_gpu_cursor_pos {
> +	__le32 scanout_id;
> +	__le32 x, y;

Prefer 
	__le32 x;
	__le32 y;

so struct size is obvious.

> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
> +struct virtio_gpu_update_cursor {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_cursor_pos pos;  /* update & move */
> +	__le32 resource_id;           /* update only */
> +	__le32 hot_x;                 /* update only */
> +	__le32 hot_y;                 /* update only */
> +	__le32 padding;
> +};
> +
> +/* data passed in the control vq, 2d related */
> +

refers  to struct virtio_gpu_rect only? or all
structs below?
> +struct virtio_gpu_rect {
> +	__le32 x, y;
> +	__le32 width;
> +	__le32 height;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
> +struct virtio_gpu_resource_unref {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
> +struct virtio_gpu_resource_create_2d {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 format;
> +	__le32 width;
> +	__le32 height;
> +};
> +
> +/* VIRTIO_GPU_CMD_SET_SCANOUT */
> +struct virtio_gpu_set_scanout {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_rect r;
> +	__le32 scanout_id;
> +	__le32 resource_id;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
> +struct virtio_gpu_resource_flush {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_rect r;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
> +struct virtio_gpu_transfer_to_host_2d {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_rect r;
> +	__le64 offset;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +struct virtio_gpu_mem_entry {
> +	__le64 addr;
> +	__le32 length;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
> +struct virtio_gpu_resource_attach_backing {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 nr_entries;
> +};
> +
> +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
> +struct virtio_gpu_resource_detach_backing {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	__le32 resource_id;
> +	__le32 padding;
> +};
> +
> +/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
> +#define VIRTIO_GPU_MAX_SCANOUTS 16
> +struct virtio_gpu_resp_display_info {
> +	struct virtio_gpu_ctrl_hdr hdr;
> +	struct virtio_gpu_display_one {
> +		struct virtio_gpu_rect r;
> +		__le32 enabled;
> +		__le32 flags;
> +	} pmodes[VIRTIO_GPU_MAX_SCANOUTS];
> +};
> +
> +#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
> +
> +struct virtio_gpu_config {
> +	__u32 events_read;
> +	__u32 events_clear;
> +	__u32 num_scanouts;
> +	__u32 reserved;
> +};
> +
> +/* simple formats for fbcon/X use */
> +enum virtio_gpu_formats {
> +	VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM  = 1,
> +	VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM  = 2,
> +	VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM  = 3,
> +	VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM  = 4,
> +
> +	VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM  = 67,
> +	VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM  = 68,
> +
> +	VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM  = 121,
> +	VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM  = 134,
> +

drop empty line here

> +};
> +
> +#endif
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 284fc3a..14d77f7 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -39,5 +39,5 @@
>  #define VIRTIO_ID_9P		9 /* 9p virtio console */
>  #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
>  #define VIRTIO_ID_CAIF	       12 /* Virtio caif */
> -
> +#define VIRTIO_ID_GPU          16

I think we like the emoty line before endif.

>  #endif /* _LINUX_VIRTIO_IDS_H */
> -- 
> 1.8.3.1
Paul Bolle March 24, 2015, 8:47 p.m. UTC | #4
Just a license nit.

On Tue, 2015-03-24 at 17:07 +0100, Gerd Hoffmann wrote:

> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
> @@ -0,0 +1,132 @@
> +/*
> + * 2011 Red Hat, Inc.
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.

I wouldn't know (without further, well, research) which license this is.

> +MODULE_LICENSE("GPL");

But I'm pretty sure it's not GPL v2 or later. So I think the license
mentioned in the comment at the top of this file and the license "ident"
used in this macro do not match.


Paul Bolle
Daniel Stone March 24, 2015, 10:50 p.m. UTC | #5
Hi,

On 24 March 2015 at 16:07, Gerd Hoffmann <kraxel@redhat.com> wrote:
> +static int virtio_gpu_crtc_page_flip(struct drm_crtc *crtc,
> +                                    struct drm_framebuffer *fb,
> +                                    struct drm_pending_vblank_event *event,
> +                                    uint32_t flags)
> +{
> +       return -EINVAL;
> +}

I'm not going to lie, I was really hoping the 5th (?) GPU option for
Qemu would support pageflipping. Daniel's comment about conversion to
atomic is relevant, but: do you have a mechanism which allows you to
post updates (e.g. 'start displaying this buffer now please') that
allows you to get events back when they have actually been displayed?

Cheers,
Daniel
Dave Airlie March 25, 2015, midnight UTC | #6
On 25 March 2015 at 08:50, Daniel Stone <daniel@fooishbar.org> wrote:
> Hi,
>
> On 24 March 2015 at 16:07, Gerd Hoffmann <kraxel@redhat.com> wrote:
>> +static int virtio_gpu_crtc_page_flip(struct drm_crtc *crtc,
>> +                                    struct drm_framebuffer *fb,
>> +                                    struct drm_pending_vblank_event *event,
>> +                                    uint32_t flags)
>> +{
>> +       return -EINVAL;
>> +}
>
> I'm not going to lie, I was really hoping the 5th (?) GPU option for
> Qemu would support pageflipping. Daniel's comment about conversion to
> atomic is relevant, but: do you have a mechanism which allows you to
> post updates (e.g. 'start displaying this buffer now please') that
> allows you to get events back when they have actually been displayed?

Page flip is implemented in a later patch,

https://www.kraxel.org/cgit/linux/commit/?h=virtio-gpu&id=1e167e8e964f8e08100d315dd354cc0a4b090841

Since its a long way from an actual display, finding out when
something is actually displayed is hard,
but when we've posted it to the frontbuffer should be fine.

Dave.
Daniel Stone March 25, 2015, 5:47 a.m. UTC | #7
Hi,

On Wednesday, March 25, 2015, Dave Airlie <airlied@gmail.com> wrote:

> On 25 March 2015 at 08:50, Daniel Stone <daniel@fooishbar.org
> <javascript:;>> wrote:
> > I'm not going to lie, I was really hoping the 5th (?) GPU option for
> > Qemu would support pageflipping. Daniel's comment about conversion to
> > atomic is relevant, but: do you have a mechanism which allows you to
> > post updates (e.g. 'start displaying this buffer now please') that
> > allows you to get events back when they have actually been displayed?
>
> Page flip is implemented in a later patch,
>
>
> https://www.kraxel.org/cgit/linux/commit/?h=virtio-gpu&id=1e167e8e964f8e08100d315dd354cc0a4b090841
>
> Since its a long way from an actual display, finding out when
> something is actually displayed is hard,
> but when we've posted it to the frontbuffer should be fine.
>

Oh nice. 100% exact timings aren't critical; it's more just having
something to gate to 60fps that we can actually drive Weston's repaint loop
off, as that's (not unreasonably) driven by pageflip events.

Cheers,
Daniel
Gerd Hoffmann March 25, 2015, 2:52 p.m. UTC | #8
Hi,

> > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > index e894eb2..a3167fa 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -510,7 +510,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
> >  		goto err_enable_device;
> >  
> >  	rc = pci_request_regions(pci_dev, "virtio-pci");
> > -	if (rc)
> > +	if (rc && ((pci_dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
> >  		goto err_request_regions;
> >  
> >  	if (force_legacy) {
> 
> This is probably what you described as "the only concern?

Ahem, no, forgot that one, but it is related.  With vesafb using and
registering the vga compat framebuffer bar pci_request_regions will not
succeed.

vesafb will be unregistered later on (this is what I was refering to) by
the virtio-gpu driver.

> If we only need to request specific
> regions, I think we should do exactly that, requesting only parts of
> regions that are covered by the virtio capabilities.

That should work too.

cheers,
  Gerd
Gerd Hoffmann March 25, 2015, 2:53 p.m. UTC | #9
> > Signed-off-by: Dave Airlie <airlied@redhat.com>
> > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> 
> Standard request from my side for new drm drivers (especially if they're
> this simple): Can you please update the drivers to latest drm internal
> interfaces, i.e. using universal planes and atomic?

Have a docs / sample code pointer for me?

thanks,
  Gerd
Gerd Hoffmann March 25, 2015, 3:19 p.m. UTC | #10
On Di, 2015-03-24 at 22:50 +0000, Daniel Stone wrote:
> Hi,
> 
> On 24 March 2015 at 16:07, Gerd Hoffmann <kraxel@redhat.com> wrote:
> > +static int virtio_gpu_crtc_page_flip(struct drm_crtc *crtc,
> > +                                    struct drm_framebuffer *fb,
> > +                                    struct drm_pending_vblank_event *event,
> > +                                    uint32_t flags)
> > +{
> > +       return -EINVAL;
> > +}
> 
> I'm not going to lie, I was really hoping the 5th (?) GPU option for
> Qemu would support pageflipping.

As Dave already pointed out there is a WIP patch for that, it'll be
there.

While being at it:
 - bochsdrm (qemu -vga std driver) supports pageflip since 3.19.
 - cirrus is more or less a lost case, we mimic existing hardware
   from the 90ies here and it simply isn't up to todays needs for
   many reasons.  Just stop using it.
 - qxl -- hmm, not sure, there is this "primary surface" concept in
   the virtual hardware design, which doesn't mix very well with
   pageflip I suspect.

> Daniel's comment about conversion to
> atomic is relevant, but: do you have a mechanism which allows you to
> post updates (e.g. 'start displaying this buffer now please') that
> allows you to get events back when they have actually been displayed?

It's possible to fence the framebuffer update requests, so you'll be
notified when the update has reached the qemu ui code.  Typically the ui
code has queued the update at that point.  So with a local display (sdl,
gtk) showing up on the screen should be just a pageflip (on the host)
away.  With a remote display (vnc) it will take a little longer until
the user will actually see the update.

cheers,
  Gerd
Michael S. Tsirkin March 25, 2015, 3:24 p.m. UTC | #11
On Wed, Mar 25, 2015 at 03:52:01PM +0100, Gerd Hoffmann wrote:
>   Hi,
> 
> > > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > > index e894eb2..a3167fa 100644
> > > --- a/drivers/virtio/virtio_pci_common.c
> > > +++ b/drivers/virtio/virtio_pci_common.c
> > > @@ -510,7 +510,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
> > >  		goto err_enable_device;
> > >  
> > >  	rc = pci_request_regions(pci_dev, "virtio-pci");
> > > -	if (rc)
> > > +	if (rc && ((pci_dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
> > >  		goto err_request_regions;
> > >  
> > >  	if (force_legacy) {
> > 
> > This is probably what you described as "the only concern?
> 
> Ahem, no, forgot that one,

What does the concern refer to then?

> but it is related.  With vesafb using and
> registering the vga compat framebuffer bar pci_request_regions will not
> succeed.
> 
> vesafb will be unregistered later on (this is what I was refering to) by
> the virtio-gpu driver.
> 
> > If we only need to request specific
> > regions, I think we should do exactly that, requesting only parts of
> > regions that are covered by the virtio capabilities.
> 
> That should work too.
> 
> cheers,
>   Gerd

BTW can we teach virtio-gpu to look for framebuffer using
virtio pci caps? Or are there limitations such as only
using IO port BARs, or compatibility with
BIOS code etc that limit us to specific BARs anyway?
Gerd Hoffmann March 25, 2015, 3:37 p.m. UTC | #12
Hi,

> BTW can we teach virtio-gpu to look for framebuffer using
> virtio pci caps?

The virtio-gpu driver doesn't matter much here, it doesn't use it
anyway.

>  Or are there limitations such as only
> using IO port BARs, or compatibility with
> BIOS code etc that limit us to specific BARs anyway?

Yes, vgabios code needs to know.  Currently it has bar #2 for the vga
framebuffer bar hardcoded.  It's 16bit code.  I don't feel like making
the probing more complicated ...

cheers,
  Gerd
Michael S. Tsirkin March 25, 2015, 5:09 p.m. UTC | #13
On Wed, Mar 25, 2015 at 04:37:16PM +0100, Gerd Hoffmann wrote:
>   Hi,
> 
> > BTW can we teach virtio-gpu to look for framebuffer using
> > virtio pci caps?
> 
> The virtio-gpu driver doesn't matter much here, it doesn't use it
> anyway.
> 
> >  Or are there limitations such as only
> > using IO port BARs, or compatibility with
> > BIOS code etc that limit us to specific BARs anyway?
> 
> Yes, vgabios code needs to know.  Currently it has bar #2 for the vga
> framebuffer bar hardcoded.  It's 16bit code.  I don't feel like making
> the probing more complicated ...
> 
> cheers,
>   Gerd

OK - you are saying all VGA cards use bar #2 for this
functionality, so we are just following
established practice here?
Gerd Hoffmann March 26, 2015, 7:12 a.m. UTC | #14
On Mi, 2015-03-25 at 18:09 +0100, Michael S. Tsirkin wrote:
> On Wed, Mar 25, 2015 at 04:37:16PM +0100, Gerd Hoffmann wrote:
> >   Hi,
> > 
> > > BTW can we teach virtio-gpu to look for framebuffer using
> > > virtio pci caps?
> > 
> > The virtio-gpu driver doesn't matter much here, it doesn't use it
> > anyway.
> > 
> > >  Or are there limitations such as only
> > > using IO port BARs, or compatibility with
> > > BIOS code etc that limit us to specific BARs anyway?
> > 
> > Yes, vgabios code needs to know.  Currently it has bar #2 for the vga
> > framebuffer bar hardcoded.  It's 16bit code.  I don't feel like making
> > the probing more complicated ...
> > 
> > cheers,
> >   Gerd
> 
> OK - you are saying all VGA cards use bar #2 for this
> functionality, so we are just following
> established practice here?

vgabios checks pci ids to figure.  qxl+stdvga use bar #0, vmware-vga bar
#1, virtio-vga bar #2.

cheers,
  Gerd
Michael S. Tsirkin March 26, 2015, 8:18 a.m. UTC | #15
On Thu, Mar 26, 2015 at 08:12:39AM +0100, Gerd Hoffmann wrote:
> On Mi, 2015-03-25 at 18:09 +0100, Michael S. Tsirkin wrote:
> > On Wed, Mar 25, 2015 at 04:37:16PM +0100, Gerd Hoffmann wrote:
> > >   Hi,
> > > 
> > > > BTW can we teach virtio-gpu to look for framebuffer using
> > > > virtio pci caps?
> > > 
> > > The virtio-gpu driver doesn't matter much here, it doesn't use it
> > > anyway.
> > > 
> > > >  Or are there limitations such as only
> > > > using IO port BARs, or compatibility with
> > > > BIOS code etc that limit us to specific BARs anyway?
> > > 
> > > Yes, vgabios code needs to know.  Currently it has bar #2 for the vga
> > > framebuffer bar hardcoded.  It's 16bit code.  I don't feel like making
> > > the probing more complicated ...
> > > 
> > > cheers,
> > >   Gerd
> > 
> > OK - you are saying all VGA cards use bar #2 for this
> > functionality, so we are just following
> > established practice here?
> 
> vgabios checks pci ids to figure.  qxl+stdvga use bar #0, vmware-vga bar
> #1, virtio-vga bar #2.
> 
> cheers,
>   Gerd
> 

And is it possible to use offset within BAR and/or memory BARs?
If yes I'd strongly prefer this.
As for writing 16 bit code, I need to do this for virtio scsi/blk
anyway, so we'll be able to share code.
Gerd Hoffmann March 26, 2015, 8:42 a.m. UTC | #16
Hi,

> And is it possible to use offset within BAR and/or memory BARs?
> If yes I'd strongly prefer this.

What is the point?  Do you want place virtio regions and vga framebuffer
in the same pci bar?  Why?  virtio is mmio and traps into qemu on
access, whereas the vga framebuffer is memory-backed (with dirty
tracking turned on).  Don't think this is a good idea, even though the
memory api would probably allow to do this.

cheers,
  Gerd
Daniel Vetter March 26, 2015, 8:53 a.m. UTC | #17
On Wed, Mar 25, 2015 at 03:53:09PM +0100, Gerd Hoffmann wrote:
> > > Signed-off-by: Dave Airlie <airlied@redhat.com>
> > > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> > 
> > Standard request from my side for new drm drivers (especially if they're
> > this simple): Can you please update the drivers to latest drm internal
> > interfaces, i.e. using universal planes and atomic?
> 
> Have a docs / sample code pointer for me?

Picking any of the recently converted drivers or recently merged atomic
drivers should be fairly informative. Overall conversion procedure is
detailed in

http://blog.ffwll.ch/2014/11/atomic-modeset-support-for-kms-drivers.html
http://blog.ffwll.ch/2015/01/update-for-atomic-display-updates.html

And ofc there's the kerneldoc stuff in the drm docbook. If you have
questions probably best to ask them in #dri-devel irc, most of the people
who've done atomic conversions hang out there and can help out.

Cheers, Daniel
Michael S. Tsirkin March 26, 2015, 9:04 a.m. UTC | #18
On Thu, Mar 26, 2015 at 09:42:47AM +0100, Gerd Hoffmann wrote:
>   Hi,
> 
> > And is it possible to use offset within BAR and/or memory BARs?
> > If yes I'd strongly prefer this.
> 
> What is the point?  Do you want place virtio regions and vga framebuffer
> in the same pci bar?  Why?  virtio is mmio and traps into qemu on
> access, whereas the vga framebuffer is memory-backed (with dirty
> tracking turned on).  Don't think this is a good idea, even though the
> memory api would probably allow to do this.
> 
> cheers,
>   Gerd

Absolutely, it's pretty common to mix regions in a BAR.
For example, we have virtio kick (ioeventfd backed,
handled in kernel) in same BAR as common and device
specific configuration.

We did the same thing you are now doing with the
virtio BAR, and now we have to maintain two code
bases, virtio pci config was designed to be future proof
so why not use it?

This is mostly just making sure we don't paint ourselves into a corner.
Gerd Hoffmann March 26, 2015, 11:38 a.m. UTC | #19
Hi,

> Absolutely, it's pretty common to mix regions in a BAR.
> For example, we have virtio kick (ioeventfd backed,
> handled in kernel) in same BAR as common and device
> specific configuration.

> We did the same thing you are now doing with the
> virtio BAR, and now we have to maintain two code
> bases, virtio pci config was designed to be future proof
> so why not use it?

It's not about virtio at all.  It's about vga compatibility, so we have
a simple framebuffer as boot display.  Only used when virtio is *not*
enabled.

> This is mostly just making sure we don't paint ourselves into a corner.

It's a simple memory bar.  vga cards have that since pci was invented
(standalone ones, chipset graphics aside), and there havn't been
fundamental changes ...

cheers,
  Gerd
Michael S. Tsirkin March 26, 2015, 11:53 a.m. UTC | #20
On Thu, Mar 26, 2015 at 12:38:43PM +0100, Gerd Hoffmann wrote:
>   Hi,
> 
> > Absolutely, it's pretty common to mix regions in a BAR.
> > For example, we have virtio kick (ioeventfd backed,
> > handled in kernel) in same BAR as common and device
> > specific configuration.
> 
> > We did the same thing you are now doing with the
> > virtio BAR, and now we have to maintain two code
> > bases, virtio pci config was designed to be future proof
> > so why not use it?
> 
> It's not about virtio at all.  It's about vga compatibility, so we have
> a simple framebuffer as boot display.  Only used when virtio is *not*
> enabled.
> 

I don't know. This seems exactly like the kind of thing
we had in mind when we added the virtio pci capability.
For example, we have text in spec that requires drivers
to skip unknown capabilities.

And yes, if bios pokes at a specific bar then we do
need to list this info in the virtio spec so this makes
it an issue that is virtio related.


> > This is mostly just making sure we don't paint ourselves into a corner.
> 
> It's a simple memory bar.  vga cards have that since pci was invented
> (standalone ones, chipset graphics aside), and there havn't been
> fundamental changes ...
> 
> cheers,
>   Gerd
> 

Yes, it's not about what we put there now. It's about being able
to move things about in the future without breaking guests.
Gerd Hoffmann March 26, 2015, 3:07 p.m. UTC | #21
Hi,

> I don't know. This seems exactly like the kind of thing
> we had in mind when we added the virtio pci capability.
> For example, we have text in spec that requires drivers
> to skip unknown capabilities.
> 
> And yes, if bios pokes at a specific bar then we do
> need to list this info in the virtio spec so this makes
> it an issue that is virtio related.

Hmm, virtio-vga is a two-in-one device basically.  When virtio is
enabled it behaves like virtio-gpu-pci, otherwise it behaves very
simliar to stdvga.  So you need to know nothing about virtio to handle
the vga side, and I want keep it that way.

When no vga compatibility is needed there always is the option to just
use virtio-gpu-pci instead.

> Yes, it's not about what we put there now. It's about being able
> to move things about in the future without breaking guests.

We don't have that today for stdvga, and I still fail to see what this
buys us.


Completely different thing crossing my mind:  I think we can make
virtio-vga fully compatible with stdvga.  stdvga has two bars, memory
(#0) and mmio (#2).  We can make the mmio bar larger and place all the
virtio regions there.


I think in any case I'll go split off the vga compatibility bits to a
different patch (and possible a separate patch series).

cheers,
  Gerd
Michael S. Tsirkin March 26, 2015, 4:47 p.m. UTC | #22
On Thu, Mar 26, 2015 at 04:07:16PM +0100, Gerd Hoffmann wrote:
>   Hi,
> 
> > I don't know. This seems exactly like the kind of thing
> > we had in mind when we added the virtio pci capability.
> > For example, we have text in spec that requires drivers
> > to skip unknown capabilities.
> > 
> > And yes, if bios pokes at a specific bar then we do
> > need to list this info in the virtio spec so this makes
> > it an issue that is virtio related.
> 
> Hmm, virtio-vga is a two-in-one device basically.  When virtio is
> enabled it behaves like virtio-gpu-pci, otherwise it behaves very
> simliar to stdvga.  So you need to know nothing about virtio to handle
> the vga side, and I want keep it that way.
> 
> When no vga compatibility is needed there always is the option to just
> use virtio-gpu-pci instead.
> 
> > Yes, it's not about what we put there now. It's about being able
> > to move things about in the future without breaking guests.
> 
> We don't have that today for stdvga, and I still fail to see what this
> buys us.
> 
> 
> Completely different thing crossing my mind:  I think we can make
> virtio-vga fully compatible with stdvga.  stdvga has two bars, memory
> (#0) and mmio (#2).  We can make the mmio bar larger and place all the
> virtio regions there.
> 

Full compatibility with some standard sounds like a better motivation,
yes.

> 
> I think in any case I'll go split off the vga compatibility bits to a
> different patch (and possible a separate patch series).
> 
> cheers,
>   Gerd

Will you still need me to change core to claim specific memory only?
Alan Cox March 26, 2015, 4:52 p.m. UTC | #23
> It's not about virtio at all.  It's about vga compatibility, so we have
> a simple framebuffer as boot display.  Only used when virtio is *not*
> enabled.

VGA can be a separate device altogether.

In fact there were *real* PCI graphics cards that did this and had a
register than flipped the output source over.

Alan
Gerd Hoffmann March 27, 2015, 8:08 a.m. UTC | #24
Hi,

> > 
> > Completely different thing crossing my mind:  I think we can make
> > virtio-vga fully compatible with stdvga.  stdvga has two bars, memory
> > (#0) and mmio (#2).  We can make the mmio bar larger and place all the
> > virtio regions there.
> > 
> 
> Full compatibility with some standard sounds like a better motivation,
> yes.

Ok, I'll look into it.

> > I think in any case I'll go split off the vga compatibility bits to a
> > different patch (and possible a separate patch series).
> > 
> > cheers,
> >   Gerd
> 
> Will you still need me to change core to claim specific memory only?

That would be great, yes.  The resource conflict with vesafb/efifb will
stay no matter how we design the pci resource layout of virtio-vga.

cheers,
  Gerd
Gerd Hoffmann March 30, 2015, 12:23 p.m. UTC | #25
Hi,

> > Signed-off-by: Dave Airlie <airlied@redhat.com>
> > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> 
> Standard request from my side for new drm drivers (especially if they're
> this simple): Can you please update the drivers to latest drm internal
> interfaces, i.e. using universal planes and atomic?

Up'n'running.  Incremental patch:

https://www.kraxel.org/cgit/linux/commit/?h=virtio-gpu-2d&id=b8edf4f38a1ec5a50f6ac8948521a12f862d3d5a

v2 coming, but I'll go over the other reviews first.

cheers,
  Gerd
Daniel Vetter March 30, 2015, 2:49 p.m. UTC | #26
On Mon, Mar 30, 2015 at 02:23:47PM +0200, Gerd Hoffmann wrote:
>   Hi,
> 
> > > Signed-off-by: Dave Airlie <airlied@redhat.com>
> > > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> > 
> > Standard request from my side for new drm drivers (especially if they're
> > this simple): Can you please update the drivers to latest drm internal
> > interfaces, i.e. using universal planes and atomic?
> 
> Up'n'running.  Incremental patch:
> 
> https://www.kraxel.org/cgit/linux/commit/?h=virtio-gpu-2d&id=b8edf4f38a1ec5a50f6ac8948521a12f862d3d5a
> 
> v2 coming, but I'll go over the other reviews first.

Looking good. Wrt pageflip the current MO is to handroll it in your
driver, common approach is to use the msm async commit implementation
msm_atomic_commit. The issue is simply that right now there's still no
useable generic vblank callback support (drm_irq.c is a mess) hence why
the core helpers don't support async flips yet.
-Daniel
Daniel Vetter May 25, 2016, 4:40 p.m. UTC | #27
On Mon, Mar 30, 2015 at 4:49 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Mon, Mar 30, 2015 at 02:23:47PM +0200, Gerd Hoffmann wrote:
>> > > Signed-off-by: Dave Airlie <airlied@redhat.com>
>> > > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
>> >
>> > Standard request from my side for new drm drivers (especially if they're
>> > this simple): Can you please update the drivers to latest drm internal
>> > interfaces, i.e. using universal planes and atomic?
>>
>> Up'n'running.  Incremental patch:
>>
>> https://www.kraxel.org/cgit/linux/commit/?h=virtio-gpu-2d&id=b8edf4f38a1ec5a50f6ac8948521a12f862d3d5a
>>
>> v2 coming, but I'll go over the other reviews first.
>
> Looking good. Wrt pageflip the current MO is to handroll it in your
> driver, common approach is to use the msm async commit implementation
> msm_atomic_commit. The issue is simply that right now there's still no
> useable generic vblank callback support (drm_irq.c is a mess) hence why
> the core helpers don't support async flips yet.

I guess I didn't do a good job at looking at your v2: Cursor is still
using legacy interfaces and not a proper plane. Would be awesome if
you could fix that up. Atomic drivers really shouldn't use the legacy
cursor interfaces any more at all.
-Daniel
Emil Velikov May 25, 2016, 4:44 p.m. UTC | #28
On 25 May 2016 at 17:40, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Mon, Mar 30, 2015 at 4:49 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Mon, Mar 30, 2015 at 02:23:47PM +0200, Gerd Hoffmann wrote:
>>> > > Signed-off-by: Dave Airlie <airlied@redhat.com>
>>> > > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
>>> >
>>> > Standard request from my side for new drm drivers (especially if they're
>>> > this simple): Can you please update the drivers to latest drm internal
>>> > interfaces, i.e. using universal planes and atomic?
>>>
>>> Up'n'running.  Incremental patch:
>>>
>>> https://www.kraxel.org/cgit/linux/commit/?h=virtio-gpu-2d&id=b8edf4f38a1ec5a50f6ac8948521a12f862d3d5a
>>>
>>> v2 coming, but I'll go over the other reviews first.
>>
>> Looking good. Wrt pageflip the current MO is to handroll it in your
>> driver, common approach is to use the msm async commit implementation
>> msm_atomic_commit. The issue is simply that right now there's still no
>> useable generic vblank callback support (drm_irq.c is a mess) hence why
>> the core helpers don't support async flips yet.
>
> I guess I didn't do a good job at looking at your v2: Cursor is still
> using legacy interfaces and not a proper plane. Would be awesome if
> you could fix that up. Atomic drivers really shouldn't use the legacy
> cursor interfaces any more at all.

Wild idea:
Worth adding if (drm_core_check_feature(dev, DRIVER_ATOMIC)  {
printf("abort abort"); return; }
style of checks for the legacy (preatomic) kms helpers ?

Or does it feel like an overkill ?

-Emil
diff mbox

Patch

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 151a050..f2388ea 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -197,6 +197,8 @@  source "drivers/gpu/drm/qxl/Kconfig"
 
 source "drivers/gpu/drm/bochs/Kconfig"
 
+source "drivers/gpu/drm/virtio/Kconfig"
+
 source "drivers/gpu/drm/msm/Kconfig"
 
 source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2c239b9..083d443 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -62,6 +62,7 @@  obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_BOCHS) += bochs/
+obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
 obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_STI) += sti/
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
new file mode 100644
index 0000000..55868e2
--- /dev/null
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -0,0 +1,11 @@ 
+config DRM_VIRTIO_GPU
+	tristate "QEMU Virtio GPU"
+	depends on DRM && VIRTIO
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_KMS_FB_HELPER
+        select DRM_TTM
+	help
+	   QEMU based virtio GPU.
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
new file mode 100644
index 0000000..57d59ee
--- /dev/null
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -0,0 +1,9 @@ 
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o
+
+obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
new file mode 100644
index 0000000..dbc497d
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -0,0 +1,64 @@ 
+/*
+ * Copyright (C) 2009 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "virtgpu_drv.h"
+
+static int
+virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+
+	seq_printf(m, "fence %ld %lld\n",
+		   atomic64_read(&vgdev->fence_drv.last_seq),
+		   vgdev->fence_drv.sync_seq);
+	return 0;
+}
+
+static struct drm_info_list virtio_gpu_debugfs_list[] = {
+	{ "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+};
+
+#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
+
+int
+virtio_gpu_debugfs_init(struct drm_minor *minor)
+{
+	drm_debugfs_create_files(virtio_gpu_debugfs_list,
+				 VIRTIO_GPU_DEBUGFS_ENTRIES,
+				 minor->debugfs_root, minor);
+	return 0;
+}
+
+void
+virtio_gpu_debugfs_takedown(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(virtio_gpu_debugfs_list,
+				 VIRTIO_GPU_DEBUGFS_ENTRIES,
+				 minor);
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
new file mode 100644
index 0000000..578a02c
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -0,0 +1,527 @@ 
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "virtgpu_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#define XRES_MIN   320
+#define YRES_MIN   200
+
+#define XRES_DEF  1024
+#define YRES_DEF   768
+
+#define XRES_MAX  8192
+#define YRES_MAX  8192
+
+static void virtio_gpu_crtc_gamma_set(struct drm_crtc *crtc,
+				      u16 *red, u16 *green, u16 *blue,
+				      uint32_t start, uint32_t size)
+{
+	/* TODO */
+}
+
+static void
+virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
+		       struct virtio_gpu_output *output)
+{
+	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+	output->cursor.resource_id = 0;
+	virtio_gpu_cursor_ping(vgdev, output);
+}
+
+static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
+				      struct drm_file *file_priv,
+				      uint32_t handle,
+				      uint32_t width,
+				      uint32_t height,
+				      int32_t hot_x, int32_t hot_y)
+{
+	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
+	struct virtio_gpu_output *output =
+		container_of(crtc, struct virtio_gpu_output, crtc);
+	struct drm_gem_object *gobj = NULL;
+	struct virtio_gpu_object *qobj = NULL;
+	struct virtio_gpu_fence *fence = NULL;
+	int ret = 0;
+
+	if (handle == 0) {
+		virtio_gpu_hide_cursor(vgdev, output);
+		return 0;
+	}
+
+	/* lookup the cursor */
+	gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (gobj == NULL)
+		return -ENOENT;
+
+	qobj = gem_to_virtio_gpu_obj(gobj);
+
+	if (!qobj->hw_res_handle) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
+						 cpu_to_le32(64),
+						 cpu_to_le32(64),
+						 0, 0, &fence);
+	if (!ret) {
+		reservation_object_add_excl_fence(qobj->tbo.resv,
+						  &fence->f);
+		virtio_gpu_object_wait(qobj, false);
+	}
+
+	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+	output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
+	output->cursor.hot_x = cpu_to_le32(hot_x);
+	output->cursor.hot_y = cpu_to_le32(hot_y);
+	virtio_gpu_cursor_ping(vgdev, output);
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return ret;
+}
+
+static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
+				    int x, int y)
+{
+	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
+	struct virtio_gpu_output *output =
+		container_of(crtc, struct virtio_gpu_output, crtc);
+
+	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
+	output->cursor.pos.x = cpu_to_le32(x);
+	output->cursor.pos.y = cpu_to_le32(y);
+	virtio_gpu_cursor_ping(vgdev, output);
+	return 0;
+}
+
+static int virtio_gpu_crtc_page_flip(struct drm_crtc *crtc,
+				     struct drm_framebuffer *fb,
+				     struct drm_pending_vblank_event *event,
+				     uint32_t flags)
+{
+	return -EINVAL;
+}
+
+
+static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
+	.cursor_set2 = virtio_gpu_crtc_cursor_set,
+	.cursor_move = virtio_gpu_crtc_cursor_move,
+	.gamma_set = virtio_gpu_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = virtio_gpu_crtc_page_flip,
+	.destroy = drm_crtc_cleanup,
+};
+
+static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct virtio_gpu_framebuffer *virtio_gpu_fb
+		= to_virtio_gpu_framebuffer(fb);
+
+	if (virtio_gpu_fb->obj)
+		drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(virtio_gpu_fb);
+}
+
+static int
+virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+				     struct drm_file *file_priv,
+				     unsigned flags, unsigned color,
+				     struct drm_clip_rect *clips,
+				     unsigned num_clips)
+{
+	struct virtio_gpu_framebuffer *virtio_gpu_fb
+		= to_virtio_gpu_framebuffer(fb);
+
+	return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
+}
+
+static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
+	.destroy = virtio_gpu_user_framebuffer_destroy,
+	.dirty = virtio_gpu_framebuffer_surface_dirty,
+};
+
+int
+virtio_gpu_framebuffer_init(struct drm_device *dev,
+			    struct virtio_gpu_framebuffer *vgfb,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj)
+{
+	int ret;
+	struct virtio_gpu_object *bo;
+	vgfb->obj = obj;
+
+	bo = gem_to_virtio_gpu_obj(obj);
+
+	ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
+	if (ret) {
+		vgfb->obj = NULL;
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&vgfb->base, mode_cmd);
+
+	spin_lock_init(&vgfb->dirty_lock);
+	vgfb->x1 = vgfb->y1 = INT_MAX;
+	vgfb->x2 = vgfb->y2 = 0;
+	return 0;
+}
+
+static void virtio_gpu_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int virtio_gpu_crtc_mode_set(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode,
+				    int x, int y,
+				    struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_object *bo, *old_bo = NULL;
+	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+	if (!crtc->primary->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (old_fb) {
+		vgfb = to_virtio_gpu_framebuffer(old_fb);
+		old_bo = gem_to_virtio_gpu_obj(vgfb->obj);
+	}
+	vgfb = to_virtio_gpu_framebuffer(crtc->primary->fb);
+	bo = gem_to_virtio_gpu_obj(vgfb->obj);
+	DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
+		  x, y,
+		  mode->hdisplay, mode->vdisplay,
+		  adjusted_mode->hdisplay,
+		  adjusted_mode->vdisplay);
+
+	virtio_gpu_cmd_set_scanout(vgdev, output->index, bo->hw_res_handle,
+				mode->hdisplay, mode->vdisplay, x, y);
+
+	return 0;
+}
+
+static void virtio_gpu_crtc_prepare(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
+		  crtc->mode.hdisplay, crtc->mode.vdisplay,
+		  crtc->x, crtc->y, crtc->enabled);
+}
+
+static void virtio_gpu_crtc_commit(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("\n");
+}
+
+static void virtio_gpu_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void virtio_gpu_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+}
+
+static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
+	.disable = virtio_gpu_crtc_disable,
+	.dpms = virtio_gpu_crtc_dpms,
+	.mode_fixup = virtio_gpu_crtc_mode_fixup,
+	.mode_set = virtio_gpu_crtc_mode_set,
+	.prepare = virtio_gpu_crtc_prepare,
+	.commit = virtio_gpu_crtc_commit,
+	.load_lut = virtio_gpu_crtc_load_lut,
+};
+
+static void virtio_gpu_enc_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void virtio_gpu_enc_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void virtio_gpu_enc_commit(struct drm_encoder *encoder)
+{
+}
+
+static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+}
+
+static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
+{
+	struct virtio_gpu_output *output =
+		drm_connector_to_virtio_gpu_output(connector);
+	struct drm_display_mode *mode = NULL;
+	int count, width, height;
+
+	width  = le32_to_cpu(output->info.r.width);
+	height = le32_to_cpu(output->info.r.height);
+	count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+
+	if (width == 0 || height == 0) {
+		width = XRES_DEF;
+		height = YRES_DEF;
+		drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+	} else {
+		DRM_DEBUG("add mode: %dx%d\n", width, height);
+		mode = drm_cvt_mode(connector->dev, width, height, 60,
+				    false, false, false);
+		mode->type |= DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, mode);
+		count++;
+	}
+
+	return count;
+}
+
+static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
+				      struct drm_display_mode *mode)
+{
+	struct virtio_gpu_output *output =
+		drm_connector_to_virtio_gpu_output(connector);
+	int width, height;
+
+	width  = le32_to_cpu(output->info.r.width);
+	height = le32_to_cpu(output->info.r.height);
+
+	if (!(mode->type & DRM_MODE_TYPE_PREFERRED))
+		return MODE_OK;
+	if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF)
+		return MODE_OK;
+	if (mode->hdisplay <= width  && mode->hdisplay >= width - 16 &&
+	    mode->vdisplay <= height && mode->vdisplay >= height - 16)
+		return MODE_OK;
+
+	DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay);
+	return MODE_BAD;
+}
+
+static struct drm_encoder*
+virtio_gpu_best_encoder(struct drm_connector *connector)
+{
+	struct virtio_gpu_output *virtio_gpu_output =
+		drm_connector_to_virtio_gpu_output(connector);
+
+	return &virtio_gpu_output->enc;
+}
+
+
+static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
+	.dpms = virtio_gpu_enc_dpms,
+	.mode_fixup = virtio_gpu_enc_mode_fixup,
+	.prepare = virtio_gpu_enc_prepare,
+	.mode_set = virtio_gpu_enc_mode_set,
+	.commit = virtio_gpu_enc_commit,
+};
+
+static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
+	.get_modes = virtio_gpu_conn_get_modes,
+	.mode_valid = virtio_gpu_conn_mode_valid,
+	.best_encoder = virtio_gpu_best_encoder,
+};
+
+static void virtio_gpu_conn_save(struct drm_connector *connector)
+{
+	DRM_DEBUG("\n");
+}
+
+static void virtio_gpu_conn_restore(struct drm_connector *connector)
+{
+	DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status virtio_gpu_conn_detect(
+			struct drm_connector *connector,
+			bool force)
+{
+	struct virtio_gpu_output *output =
+		drm_connector_to_virtio_gpu_output(connector);
+
+	if (output->info.enabled)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static int virtio_gpu_conn_set_property(struct drm_connector *connector,
+				   struct drm_property *property,
+				   uint64_t value)
+{
+	DRM_DEBUG("\n");
+	return 0;
+}
+
+static void virtio_gpu_conn_destroy(struct drm_connector *connector)
+{
+	struct virtio_gpu_output *virtio_gpu_output =
+		drm_connector_to_virtio_gpu_output(connector);
+
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(virtio_gpu_output);
+}
+
+static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = virtio_gpu_conn_save,
+	.restore = virtio_gpu_conn_restore,
+	.detect = virtio_gpu_conn_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = virtio_gpu_conn_set_property,
+	.destroy = virtio_gpu_conn_destroy,
+};
+
+static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
+{
+	struct drm_device *dev = vgdev->ddev;
+	struct virtio_gpu_output *output = vgdev->outputs + index;
+	struct drm_connector *connector = &output->conn;
+	struct drm_encoder *encoder = &output->enc;
+	struct drm_crtc *crtc = &output->crtc;
+
+	output->index = index;
+	if (index == 0) {
+		output->info.enabled = cpu_to_le32(true);
+		output->info.r.width = cpu_to_le32(XRES_DEF);
+		output->info.r.height = cpu_to_le32(YRES_DEF);
+	}
+
+	drm_crtc_init(dev, crtc, &virtio_gpu_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+	drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
+
+	drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+
+	encoder->possible_crtcs = 1 << index;
+	drm_mode_connector_attach_encoder(connector, encoder);
+	drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
+	drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
+	drm_connector_register(connector);
+	return 0;
+}
+
+static struct drm_framebuffer *
+virtio_gpu_user_framebuffer_create(struct drm_device *dev,
+				   struct drm_file *file_priv,
+				   struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj = NULL;
+	struct virtio_gpu_framebuffer *virtio_gpu_fb;
+	int ret;
+
+	/* lookup object associated with res handle */
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	if (!obj)
+		return ERR_PTR(-EINVAL);
+
+	virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
+	if (virtio_gpu_fb == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+	if (ret) {
+		kfree(virtio_gpu_fb);
+		if (obj)
+			drm_gem_object_unreference_unlocked(obj);
+		return NULL;
+	}
+
+	return &virtio_gpu_fb->base;
+}
+
+static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
+	.fb_create = virtio_gpu_user_framebuffer_create,
+};
+
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
+{
+	int i;
+	int ret;
+
+	drm_mode_config_init(vgdev->ddev);
+	vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
+
+	/* modes will be validated against the framebuffer size */
+	vgdev->ddev->mode_config.min_width = XRES_MIN;
+	vgdev->ddev->mode_config.min_height = YRES_MIN;
+	vgdev->ddev->mode_config.max_width = XRES_MAX;
+	vgdev->ddev->mode_config.max_height = YRES_MAX;
+
+	for (i = 0 ; i < vgdev->num_scanouts; ++i)
+		vgdev_output_init(vgdev, i);
+
+	/* primary surface must be created by this point, to allow
+	 * issuing command queue commands and having them read by
+	 * spice server. */
+	ret = virtio_gpu_fbdev_init(vgdev);
+	if (ret)
+		return ret;
+
+	ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
+
+	drm_kms_helper_poll_init(vgdev->ddev);
+	return ret;
+}
+
+void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
+{
+	virtio_gpu_fbdev_fini(vgdev);
+	drm_mode_config_cleanup(vgdev->ddev);
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
new file mode 100644
index 0000000..e4b50af
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -0,0 +1,68 @@ 
+#include <linux/pci.h>
+
+#include "virtgpu_drv.h"
+
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+	struct pci_dev *pdev = dev->pdev;
+
+	if (pdev) {
+		return drm_pci_set_busid(dev, master);
+	}
+	return 0;
+}
+
+static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
+{
+	struct apertures_struct *ap;
+	bool primary;
+	ap = alloc_apertures(1);
+	if (!ap)
+		return;
+
+	ap->ranges[0].base = pci_resource_start(pci_dev, 2);
+	ap->ranges[0].size = pci_resource_len(pci_dev, 2);
+
+	primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
+		& IORESOURCE_ROM_SHADOW;
+
+	remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
+
+	kfree(ap);
+}
+
+int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
+{
+	struct drm_device *dev;
+	int ret;
+
+	dev = drm_dev_alloc(driver, &vdev->dev);
+	if (!dev)
+		return -ENOMEM;
+	dev->virtdev = vdev;
+	vdev->priv = dev;
+
+	if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
+		struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+		bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+		DRM_INFO("pci: %s detected\n",
+			 vga ? "virtio-vga" : "virtio-gpu-pci");
+		dev->pdev = pdev;
+		if (vga)
+			virtio_pci_kick_out_firmware_fb(pdev);
+	}
+
+	ret = drm_dev_register(dev, 0);
+	if (ret)
+		goto err_free;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+		 driver->major, driver->minor, driver->patchlevel,
+		 driver->date, dev->primary->index);
+
+	return 0;
+
+err_free:
+	drm_dev_unref(dev);
+	return ret;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
new file mode 100644
index 0000000..3662e86
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -0,0 +1,132 @@ 
+/*
+ * 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/pci.h>
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "virtgpu_drv.h"
+static struct drm_driver driver;
+
+static int virtio_gpu_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, virtio_gpu_modeset, int, 0400);
+
+static int virtio_gpu_probe(struct virtio_device *vdev)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && virtio_gpu_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (virtio_gpu_modeset == 0)
+		return -EINVAL;
+
+	return drm_virtio_init(&driver, vdev);
+}
+
+static void virtio_gpu_remove(struct virtio_device *vdev)
+{
+	struct drm_device *dev = vdev->priv;
+	drm_put_dev(dev);
+}
+
+static void virtio_gpu_config_changed(struct virtio_device *vdev)
+{
+	struct drm_device *dev = vdev->priv;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+
+	schedule_work(&vgdev->config_changed_work);
+}
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static unsigned int features[] = {
+};
+static struct virtio_driver virtio_gpu_driver = {
+	.feature_table = features,
+	.feature_table_size = ARRAY_SIZE(features),
+	.driver.name = KBUILD_MODNAME,
+	.driver.owner = THIS_MODULE,
+	.id_table = id_table,
+	.probe = virtio_gpu_probe,
+	.remove = virtio_gpu_remove,
+	.config_changed = virtio_gpu_config_changed
+};
+
+module_virtio_driver(virtio_gpu_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio GPU driver");
+MODULE_LICENSE("GPL");
+
+static const struct file_operations virtio_gpu_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.mmap = virtio_gpu_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+	.unlocked_ioctl	= drm_ioctl,
+	.release = drm_release,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM,
+	.set_busid = drm_virtio_set_busid,
+	.load = virtio_gpu_driver_load,
+	.unload = virtio_gpu_driver_unload,
+
+	.dumb_create = virtio_gpu_mode_dumb_create,
+	.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
+	.dumb_destroy = virtio_gpu_mode_dumb_destroy,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = virtio_gpu_debugfs_init,
+	.debugfs_cleanup = virtio_gpu_debugfs_takedown,
+#endif
+
+	.gem_free_object = virtio_gpu_gem_free_object,
+	.fops = &virtio_gpu_driver_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
new file mode 100644
index 0000000..6082ec3
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -0,0 +1,326 @@ 
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef VIRTIO_DRV_H
+#define VIRTIO_DRV_H
+
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_gpu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_crtc_helper.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#define DRIVER_NAME "virtio_gpu"
+#define DRIVER_DESC "virtio GPU"
+#define DRIVER_DATE "0"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 1
+
+/* virtgpu_drm_bus.c */
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
+int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
+
+struct virtio_gpu_object {
+	struct drm_gem_object gem_base;
+	uint32_t hw_res_handle;
+
+	struct sg_table *pages;
+	void *vmap;
+	bool dumb;
+	struct ttm_place                placement_code;
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+};
+#define gem_to_virtio_gpu_obj(gobj) \
+	container_of((gobj), struct virtio_gpu_object, gem_base)
+
+struct virtio_gpu_vbuffer;
+struct virtio_gpu_device;
+
+typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
+				   struct virtio_gpu_vbuffer *vbuf);
+
+struct virtio_gpu_fence_driver {
+	atomic64_t       last_seq;
+	uint64_t         sync_seq;
+	struct list_head fences;
+	spinlock_t       lock;
+};
+
+struct virtio_gpu_fence {
+	struct fence f;
+	struct virtio_gpu_fence_driver *drv;
+	struct list_head node;
+	uint64_t seq;
+};
+#define to_virtio_fence(x) \
+	container_of(x, struct virtio_gpu_fence, f)
+
+struct virtio_gpu_vbuffer {
+	char *buf;
+	int size;
+	bool debug_dump_sglists;
+
+	void *data_buf;
+	uint32_t data_size;
+
+	char *resp_buf;
+	int resp_size;
+
+	virtio_gpu_resp_cb resp_cb;
+
+	struct list_head destroy_list;
+};
+
+struct virtio_gpu_output {
+	int index;
+	struct drm_crtc crtc;
+	struct drm_connector conn;
+	struct drm_encoder enc;
+	struct virtio_gpu_display_one info;
+	struct virtio_gpu_update_cursor cursor;
+	int cur_x;
+	int cur_y;
+};
+#define drm_crtc_to_virtio_gpu_output(x) \
+	container_of(x, struct virtio_gpu_output, crtc)
+#define drm_connector_to_virtio_gpu_output(x) \
+	container_of(x, struct virtio_gpu_output, conn)
+#define drm_encoder_to_virtio_gpu_output(x) \
+	container_of(x, struct virtio_gpu_output, enc)
+
+struct virtio_gpu_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+	int x1, y1, x2, y2; /* dirty rect */
+	spinlock_t dirty_lock;
+	uint32_t hw_res_handle;
+};
+#define to_virtio_gpu_framebuffer(x) \
+	container_of(x, struct virtio_gpu_framebuffer, base)
+
+struct virtio_gpu_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	bool				mem_global_referenced;
+	struct ttm_bo_device		bdev;
+};
+
+struct virtio_gpu_fbdev;
+
+struct virtio_gpu_queue {
+	struct virtqueue *vq;
+	spinlock_t qlock;
+	wait_queue_head_t ack_queue;
+	struct work_struct dequeue_work;
+};
+
+struct virtio_gpu_device {
+	struct device *dev;
+	struct drm_device *ddev;
+
+	struct virtio_device *vdev;
+
+	struct virtio_gpu_mman mman;
+
+	/* pointer to fbdev info structure */
+	struct virtio_gpu_fbdev *vgfbdev;
+	struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
+	uint32_t num_scanouts;
+
+	struct virtio_gpu_queue ctrlq;
+	struct virtio_gpu_queue cursorq;
+
+	struct idr	resource_idr;
+	spinlock_t resource_idr_lock;
+
+	wait_queue_head_t resp_wq;
+	/* current display info */
+	spinlock_t display_info_lock;
+
+	struct virtio_gpu_fence_driver fence_drv;
+
+	struct idr	ctx_id_idr;
+	spinlock_t ctx_id_idr_lock;
+
+	struct work_struct config_changed_work;
+};
+
+struct virtio_gpu_fpriv {
+	uint32_t ctx_id;
+};
+
+/* virtio_ioctl.c */
+#define DRM_VIRTIO_NUM_IOCTLS 10
+extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+
+/* virtio_kms.c */
+int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
+int virtio_gpu_driver_unload(struct drm_device *dev);
+
+/* virtio_gem.c */
+void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
+int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
+int virtio_gpu_gem_create(struct drm_file *file,
+			  struct drm_device *dev,
+			  uint64_t size,
+			  struct drm_gem_object **obj_p,
+			  uint32_t *handle_p);
+struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
+						  size_t size, bool kernel,
+						  bool pinned);
+int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
+				struct drm_device *dev,
+				struct drm_mode_create_dumb *args);
+int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
+				 struct drm_device *dev,
+				 uint32_t handle);
+int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
+			      struct drm_device *dev,
+			      uint32_t handle, uint64_t *offset_p);
+
+/* virtio_fb */
+#define VIRTIO_GPUFB_CONN_LIMIT 1
+int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
+int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
+			     struct drm_clip_rect *clips,
+			     unsigned num_clips);
+/* virtio vg */
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+			       uint32_t *resid);
+void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
+int virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
+				   uint32_t resource_id,
+				   uint32_t format,
+				   uint32_t width,
+				   uint32_t height);
+int virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+				  uint32_t resource_id);
+int virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+				       uint32_t resource_id, uint64_t offset,
+				       __le32 width, __le32 height,
+				       __le32 x, __le32 y,
+				       struct virtio_gpu_fence **fence);
+int virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+				  uint32_t resource_id,
+				  uint32_t x, uint32_t y,
+				  uint32_t width, uint32_t height);
+int virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+			       uint32_t scanout_id, uint32_t resource_id,
+			       uint32_t width, uint32_t height,
+			       uint32_t x, uint32_t y);
+int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+			     struct virtio_gpu_object *obj,
+			     uint32_t resource_id,
+			     struct virtio_gpu_fence **fence);
+int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
+int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
+void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
+			    struct virtio_gpu_output *output);
+int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
+int virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+					  uint32_t resource_id);
+void virtio_gpu_ctrl_ack(struct virtqueue *vq);
+void virtio_gpu_cursor_ack(struct virtqueue *vq);
+void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
+void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
+
+/* virtio_gpu_display.c */
+int virtio_gpu_framebuffer_init(struct drm_device *dev,
+				struct virtio_gpu_framebuffer *vgfb,
+				struct drm_mode_fb_cmd2 *mode_cmd,
+				struct drm_gem_object *obj);
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
+
+/* virtio_gpu_ttm.c */
+int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
+bool virtio_gpu_ttm_bo_is_virtio_gpu_object(struct ttm_buffer_object *bo);
+int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* virtio_gpu_fence.c */
+int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
+			  struct virtio_gpu_fence **fence);
+void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
+				    u64 last_seq);
+
+/* virtio_gpu_object */
+int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+			     unsigned long size, bool kernel, bool pinned,
+			     struct virtio_gpu_object **bo_ptr);
+int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr);
+int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+				   struct virtio_gpu_object *bo);
+void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
+int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
+
+static inline struct virtio_gpu_object*
+virtio_gpu_object_ref(struct virtio_gpu_object *bo)
+{
+	ttm_bo_reference(&bo->tbo);
+	return bo;
+}
+
+static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+	tbo = &((*bo)->tbo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+}
+
+static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
+{
+	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+}
+
+static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
+					 bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct virtio_gpu_device *qdev =
+				bo->gem_base.dev->dev_private;
+			dev_err(qdev->dev, "%p reserve failed\n", bo);
+		}
+		return r;
+	}
+	return 0;
+}
+
+static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/* virgl debufs */
+int virtio_gpu_debugfs_init(struct drm_minor *minor);
+void virtio_gpu_debugfs_takedown(struct drm_minor *minor);
+
+#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
new file mode 100644
index 0000000..1d79457
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -0,0 +1,415 @@ 
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "virtgpu_drv.h"
+
+#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
+
+struct virtio_gpu_fbdev {
+	struct drm_fb_helper           helper;
+	struct virtio_gpu_framebuffer  vgfb;
+	struct list_head	       fbdev_list;
+	struct virtio_gpu_device       *vgdev;
+	struct delayed_work            work;
+};
+#define DL_ALIGN_UP(x, a) ALIGN(x, a)
+#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+
+static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
+				   bool store, int x, int y,
+				   int width, int height)
+{
+	struct drm_device *dev = fb->base.dev;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	bool store_for_later = false;
+	int aligned_x;
+	int bpp = (fb->base.bits_per_pixel / 8);
+	int x2, y2;
+	unsigned long flags;
+	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+
+	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+	x = aligned_x;
+
+	if ((width <= 0) ||
+	    (x + width > fb->base.width) ||
+	    (y + height > fb->base.height)) {
+		DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
+			  width, height, x, y,
+			  fb->base.width, fb->base.height);
+		return -EINVAL;
+	}
+
+	/* if we are in atomic just store the info
+	   can't test inside spin lock */
+	if (in_atomic() || store)
+		store_for_later = true;
+
+	x2 = x + width - 1;
+	y2 = y + height - 1;
+
+	spin_lock_irqsave(&fb->dirty_lock, flags);
+
+	if (fb->y1 < y)
+		y = fb->y1;
+	if (fb->y2 > y2)
+		y2 = fb->y2;
+	if (fb->x1 < x)
+		x = fb->x1;
+	if (fb->x2 > x2)
+		x2 = fb->x2;
+
+	if (store_for_later) {
+		fb->x1 = x;
+		fb->x2 = x2;
+		fb->y1 = y;
+		fb->y2 = y2;
+		spin_unlock_irqrestore(&fb->dirty_lock, flags);
+		return 0;
+	}
+
+	fb->x1 = fb->y1 = INT_MAX;
+	fb->x2 = fb->y2 = 0;
+
+	spin_unlock_irqrestore(&fb->dirty_lock, flags);
+
+	{
+		uint32_t offset;
+		uint32_t w = x2 - x + 1;
+		uint32_t h = y2 - y + 1;
+
+		offset = (y * fb->base.pitches[0]) + x * bpp;
+
+		virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
+						   offset,
+						   cpu_to_le32(w),
+						   cpu_to_le32(h),
+						   cpu_to_le32(x),
+						   cpu_to_le32(y),
+						   NULL);
+
+	}
+	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
+				      x, y, x2 - x + 1, y2 - y + 1);
+	return 0;
+}
+
+int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
+			     struct drm_clip_rect *clips,
+			     unsigned num_clips)
+{
+	struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
+	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+	struct drm_clip_rect norect;
+	struct drm_clip_rect *clips_ptr;
+	int left, right, top, bottom;
+	int i;
+	int inc = 1;
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = vgfb->base.width;
+		norect.y2 = vgfb->base.height;
+	}
+	left = clips->x1;
+	right = clips->x2;
+	top = clips->y1;
+	bottom = clips->y2;
+
+	/* skip the first clip rect */
+	for (i = 1, clips_ptr = clips + inc;
+	     i < num_clips; i++, clips_ptr += inc) {
+		left = min_t(int, left, (int)clips_ptr->x1);
+		right = max_t(int, right, (int)clips_ptr->x2);
+		top = min_t(int, top, (int)clips_ptr->y1);
+		bottom = max_t(int, bottom, (int)clips_ptr->y2);
+	}
+
+	if (obj->dumb)
+		return virtio_gpu_dirty_update(vgfb, false, left, top,
+					       right - left, bottom - top);
+
+	virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
+				      left, top, right - left, bottom - top);
+	return 0;
+}
+
+static void virtio_gpu_fb_dirty_work(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct virtio_gpu_fbdev *vfbdev =
+		container_of(delayed_work, struct virtio_gpu_fbdev, work);
+	struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
+
+	virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
+				vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
+}
+
+static void virtio_gpu_3d_fillrect(struct fb_info *info,
+				   const struct fb_fillrect *rect)
+{
+	struct virtio_gpu_fbdev *vfbdev = info->par;
+	sys_fillrect(info, rect);
+	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
+			     rect->width, rect->height);
+	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
+}
+
+static void virtio_gpu_3d_copyarea(struct fb_info *info,
+				   const struct fb_copyarea *area)
+{
+	struct virtio_gpu_fbdev *vfbdev = info->par;
+	sys_copyarea(info, area);
+	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
+			   area->width, area->height);
+	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
+}
+
+static void virtio_gpu_3d_imageblit(struct fb_info *info,
+				    const struct fb_image *image)
+{
+	struct virtio_gpu_fbdev *vfbdev = info->par;
+	sys_imageblit(info, image);
+	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
+			     image->width, image->height);
+	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
+}
+
+static struct fb_ops virtio_gpufb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+	.fb_fillrect = virtio_gpu_3d_fillrect,
+	.fb_copyarea = virtio_gpu_3d_copyarea,
+	.fb_imageblit = virtio_gpu_3d_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
+			      struct virtio_gpu_object *obj)
+{
+	return virtio_gpu_object_kmap(obj, NULL);
+}
+
+static int virtio_gpufb_create(struct drm_fb_helper *helper,
+			       struct drm_fb_helper_surface_size *sizes)
+{
+	struct virtio_gpu_fbdev *vfbdev =
+		container_of(helper, struct virtio_gpu_fbdev, helper);
+	struct drm_device *dev = helper->dev;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd = {};
+	struct virtio_gpu_object *obj;
+	struct device *device = vgdev->dev;
+	uint32_t resid, format, size;
+	int ret;
+
+	if (sizes->surface_bpp == 24)
+		sizes->surface_bpp = 32;
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	switch (mode_cmd.pixel_format) {
+#ifdef __BIG_ENDIAN
+	case DRM_FORMAT_XRGB8888:
+		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+		break;
+	case DRM_FORMAT_BGRA8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+		break;
+	case DRM_FORMAT_RGBX8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+		break;
+#else
+	case DRM_FORMAT_XRGB8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_BGRA8888:
+		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_RGBX8888:
+		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+		break;
+#endif
+	default:
+		format = 0;
+		break;
+	}
+	if (format == 0) {
+		ret = -EINVAL;
+		DRM_ERROR("failed to find virtio gpu format for %d\n",
+			  mode_cmd.pixel_format);
+		goto fail;
+	}
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	obj = virtio_gpu_alloc_object(dev, size, false, true);
+	if (!obj) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = virtio_gpu_resource_id_get(vgdev, &resid);
+	if (ret)
+		goto fail;
+
+	ret = virtio_gpu_cmd_create_resource(vgdev, resid, format,
+					  mode_cmd.width, mode_cmd.height);
+	if (ret)
+		goto fail;
+
+	ret = virtio_gpu_vmap_fb(vgdev, obj);
+	if (ret) {
+		DRM_ERROR("failed to vmap fb %d\n", ret);
+		goto fail;
+	}
+
+	/* attach the object to the resource */
+	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+	if (ret)
+		goto fail;
+
+	info = framebuffer_alloc(0, device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	info->par = helper;
+
+	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
+				       &mode_cmd, &obj->gem_base);
+	if (ret)
+		goto fail;
+
+	fb = &vfbdev->vgfb.base;
+
+	vfbdev->helper.fb = fb;
+	vfbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "virtiodrmfb");
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &virtio_gpufb_ops;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	info->screen_base = obj->vmap;
+	info->screen_size = obj->gem_base.size;
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &vfbdev->helper,
+			       sizes->fb_width, sizes->fb_height);
+
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+
+	return 0;
+fail:
+
+	return -EINVAL;
+}
+
+static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
+				    struct virtio_gpu_fbdev *vgfbdev)
+{
+	struct fb_info *info;
+	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
+
+	if (vgfbdev->helper.fbdev) {
+		info = vgfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		framebuffer_release(info);
+	}
+	if (vgfb->obj)
+		vgfb->obj = NULL;
+	drm_fb_helper_fini(&vgfbdev->helper);
+	drm_framebuffer_cleanup(&vgfb->base);
+
+	return 0;
+}
+static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
+	.fb_probe = virtio_gpufb_create,
+};
+
+int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
+{
+	struct virtio_gpu_fbdev *vgfbdev;
+	int bpp_sel = 32; /* TODO: parameter from somewhere? */
+	int ret;
+
+	vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
+	if (!vgfbdev)
+		return -ENOMEM;
+
+	vgfbdev->vgdev = vgdev;
+	vgdev->vgfbdev = vgfbdev;
+	INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
+
+	drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
+			      &virtio_gpu_fb_helper_funcs);
+	ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
+				 vgdev->num_scanouts,
+				 VIRTIO_GPUFB_CONN_LIMIT);
+	if (ret) {
+		kfree(vgfbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
+	drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
+{
+	if (!vgdev->vgfbdev)
+		return;
+
+	virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
+	kfree(vgdev->vgfbdev);
+	vgdev->vgfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
new file mode 100644
index 0000000..552aa49
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -0,0 +1,95 @@ 
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+
+static const char *virtio_get_driver_name(struct fence *f)
+{
+	return "virtio_gpu";
+}
+
+static const char *virtio_get_timeline_name(struct fence *f)
+{
+	return "controlq";
+}
+
+static bool virtio_enable_signaling(struct fence *f)
+{
+	return true;
+}
+
+static bool virtio_signaled(struct fence *f)
+{
+	struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+	if (atomic64_read(&fence->drv->last_seq) >= fence->seq) {
+		return true;
+	}
+	return false;
+}
+
+static void virtio_fence_value_str(struct fence *f, char *str, int size)
+{
+	struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+	snprintf(str, size, "%llu", fence->seq);
+}
+
+static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+{
+	struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+	snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
+}
+
+static const struct fence_ops virtio_fence_ops = {
+	.get_driver_name     = virtio_get_driver_name,
+	.get_timeline_name   = virtio_get_timeline_name,
+	.enable_signaling    = virtio_enable_signaling,
+	.signaled            = virtio_signaled,
+	.wait                = fence_default_wait,
+	.fence_value_str     = virtio_fence_value_str,
+	.timeline_value_str  = virtio_timeline_value_str,
+};
+
+int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
+			  struct virtio_gpu_fence **fence)
+{
+	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+	unsigned long irq_flags;
+
+	*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL);
+	if ((*fence) == NULL)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&drv->lock, irq_flags);
+	(*fence)->drv = drv;
+	(*fence)->seq = ++drv->sync_seq;
+	fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+		   0, (*fence)->seq);
+	fence_get(&(*fence)->f);
+	list_add_tail(&(*fence)->node, &drv->fences);
+	spin_unlock_irqrestore(&drv->lock, irq_flags);
+
+	cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
+	cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+	return 0;
+}
+
+void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
+				    u64 last_seq)
+{
+	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+	struct virtio_gpu_fence *fence, *tmp;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&drv->lock, irq_flags);
+	atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
+	list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
+		if (last_seq < fence->seq)
+			continue;
+		fence_signal_locked(&fence->f);
+		list_del(&fence->node);
+		fence_put(&fence->f);
+	}
+	spin_unlock_irqrestore(&drv->lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
new file mode 100644
index 0000000..8bc0a24
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -0,0 +1,120 @@ 
+
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+
+void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
+
+	if (obj)
+		virtio_gpu_object_unref(&obj);
+}
+
+struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
+						  size_t size, bool kernel,
+						  bool pinned)
+{
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct virtio_gpu_object *obj;
+	int ret;
+
+	ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return obj;
+}
+
+int virtio_gpu_gem_create(struct drm_file *file,
+			  struct drm_device *dev,
+			  uint64_t size,
+			  struct drm_gem_object **obj_p,
+			  uint32_t *handle_p)
+{
+	struct virtio_gpu_object *obj;
+	int ret;
+	u32 handle;
+
+	obj = virtio_gpu_alloc_object(dev, size, false, false);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+	if (ret) {
+		drm_gem_object_release(&obj->gem_base);
+		return ret;
+	}
+
+	*obj_p = &obj->gem_base;
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(&obj->gem_base);
+
+	*handle_p = handle;
+	return 0;
+}
+
+int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
+				struct drm_device *dev,
+				struct drm_mode_create_dumb *args)
+{
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct drm_gem_object *gobj;
+	struct virtio_gpu_object *obj;
+	int ret;
+	uint32_t pitch;
+	uint32_t resid;
+
+	pitch = args->width * ((args->bpp + 1) / 8);
+	args->size = pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
+				 &args->handle);
+	if (ret)
+		goto fail;
+
+	ret = virtio_gpu_resource_id_get(vgdev, &resid);
+	if (ret)
+		goto fail;
+
+	ret = virtio_gpu_cmd_create_resource(vgdev, resid,
+					  2, args->width, args->height);
+	if (ret)
+		goto fail;
+
+	/* attach the object to the resource */
+	obj = gem_to_virtio_gpu_obj(gobj);
+	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+	if (ret)
+		goto fail;
+
+	obj->dumb = true;
+	args->pitch = pitch;
+	return ret;
+fail:
+	return ret;
+}
+
+int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
+				 struct drm_device *dev,
+				 uint32_t handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+
+int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
+			      struct drm_device *dev,
+			      uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct virtio_gpu_object *obj;
+	BUG_ON(!offset_p);
+	gobj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	obj = gem_to_virtio_gpu_obj(gobj);
+	*offset_p = virtio_gpu_object_mmap_offset(obj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
new file mode 100644
index 0000000..45c4beb
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -0,0 +1,125 @@ 
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+
+static void virtio_gpu_config_changed_work_func(struct work_struct *work)
+{
+	struct virtio_gpu_device *vgdev =
+		container_of(work, struct virtio_gpu_device,
+			     config_changed_work);
+	u32 events_read, events_clear = 0;
+
+	/* read the config space */
+	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
+		     events_read, &events_read);
+	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
+		virtio_gpu_cmd_get_display_info(vgdev);
+		drm_helper_hpd_irq_event(vgdev->ddev);
+		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
+	}
+	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
+		      events_clear, &events_clear);
+}
+
+static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
+			       void (*work_func)(struct work_struct *work))
+{
+	spin_lock_init(&vgvq->qlock);
+	init_waitqueue_head(&vgvq->ack_queue);
+	INIT_WORK(&vgvq->dequeue_work, work_func);
+}
+
+int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	static vq_callback_t *callbacks[] = {
+		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
+	};
+	static const char *names[] = { "control", "cursor" };
+
+	struct virtio_gpu_device *vgdev;
+	/* this will expand later */
+	struct virtqueue *vqs[2];
+	u32 num_scanouts;
+	int ret;
+
+	if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
+		return -ENODEV;
+
+	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
+	if (!vgdev)
+		return -ENOMEM;
+
+	vgdev->ddev = dev;
+	dev->dev_private = vgdev;
+	vgdev->vdev = dev->virtdev;
+	vgdev->dev = dev->dev;
+
+	spin_lock_init(&vgdev->display_info_lock);
+	spin_lock_init(&vgdev->ctx_id_idr_lock);
+	idr_init(&vgdev->ctx_id_idr);
+	spin_lock_init(&vgdev->resource_idr_lock);
+	idr_init(&vgdev->resource_idr);
+	init_waitqueue_head(&vgdev->resp_wq);
+	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
+	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
+
+	spin_lock_init(&vgdev->fence_drv.lock);
+	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
+	INIT_WORK(&vgdev->config_changed_work,
+		  virtio_gpu_config_changed_work_func);
+
+	ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
+					    callbacks, names);
+	if (ret) {
+		DRM_ERROR("failed to find virt queues\n");
+		goto err_vqs;
+	}
+	vgdev->ctrlq.vq = vqs[0];
+	vgdev->cursorq.vq = vqs[1];
+
+	ret = virtio_gpu_ttm_init(vgdev);
+	if (ret) {
+		DRM_ERROR("failed to init ttm %d\n", ret);
+		goto err_ttm;
+	}
+
+	/* get display info */
+	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
+		     num_scanouts, &num_scanouts);
+	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
+				    VIRTIO_GPU_MAX_SCANOUTS);
+	if (!vgdev->num_scanouts) {
+		DRM_ERROR("num_scanouts is zero\n");
+		ret = -EINVAL;
+		goto err_scanouts;
+	}
+
+	ret = virtio_gpu_modeset_init(vgdev);
+	if (ret)
+		goto err_modeset;
+
+	virtio_device_ready(vgdev->vdev);
+	virtio_gpu_cmd_get_display_info(vgdev);
+	return 0;
+
+err_modeset:
+err_scanouts:
+	virtio_gpu_ttm_fini(vgdev);
+err_ttm:
+	vgdev->vdev->config->del_vqs(vgdev->vdev);
+err_vqs:
+	kfree(vgdev);
+	return ret;
+}
+
+int virtio_gpu_driver_unload(struct drm_device *dev)
+{
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+
+	virtio_gpu_modeset_fini(vgdev);
+	virtio_gpu_ttm_fini(vgdev);
+	vgdev->vdev->config->del_vqs(vgdev->vdev);
+	kfree(vgdev);
+	return 0;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
new file mode 100644
index 0000000..0d98ae4
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -0,0 +1,174 @@ 
+#include "virtgpu_drv.h"
+
+static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct virtio_gpu_object *bo;
+	struct virtio_gpu_device *vgdev;
+
+	bo = container_of(tbo, struct virtio_gpu_object, tbo);
+	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+
+	if (bo->hw_res_handle)
+		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
+	if (bo->pages)
+		virtio_gpu_object_free_sg_table(bo);
+	drm_gem_object_release(&bo->gem_base);
+	kfree(bo);
+}
+
+bool virtio_gpu_ttm_bo_is_virtio_gpu_object(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &virtio_gpu_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
+					  bool pinned)
+{
+	u32 c = 1;
+	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+
+	vgbo->placement.placement = &vgbo->placement_code;
+	vgbo->placement.busy_placement = &vgbo->placement_code;
+	vgbo->placement_code.fpfn = 0;
+	vgbo->placement_code.lpfn = 0;
+	vgbo->placement_code.flags =
+		TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
+	vgbo->placement.num_placement = c;
+	vgbo->placement.num_busy_placement = c;
+
+}
+
+int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+			     unsigned long size, bool kernel, bool pinned,
+			     struct virtio_gpu_object **bo_ptr)
+{
+	struct virtio_gpu_object *bo;
+	enum ttm_bo_type type;
+	size_t acc_size;
+	int r;
+
+	if (kernel)
+		type = ttm_bo_type_kernel;
+	else
+		type = ttm_bo_type_device;
+	*bo_ptr = NULL;
+
+	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
+				       sizeof(struct virtio_gpu_object));
+
+	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	size = roundup(size, PAGE_SIZE);
+	r = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
+	if (unlikely(r)) {
+		kfree(bo);
+		return r;
+	}
+	bo->dumb = false;
+
+	virtio_gpu_init_ttm_placement(bo, pinned);
+	r = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, 0, !kernel, NULL, acc_size,
+			NULL, NULL, &virtio_gpu_ttm_bo_destroy);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(vgdev->dev,
+				"object_init %d failed for (%lu)\n", r,
+				size);
+		return r;
+	}
+	*bo_ptr = bo;
+	return 0;
+}
+
+int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->vmap) {
+		if (ptr)
+			*ptr = bo->vmap;
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r)
+		return r;
+	bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr)
+		*ptr = bo->vmap;
+	return 0;
+}
+
+#if 0
+void virtio_gpu_object_force_delete(struct virtio_gpu_device *vgdev)
+{
+	struct virtio_gpu_object *bo, *n;
+
+
+	dev_err(vgdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &vgdev->gem.objects, list) {
+		mutex_lock(&vgdev->ddev->struct_mutex);
+		dev_err(vgdev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		spin_lock(&vgdev->gem.lock);
+		list_del_init(&bo->list);
+		spin_unlock(&vgdev->gem.lock);
+		/* this should unref the ttm bo */
+		drm_gem_object_unreference(&bo->gem_base);
+		mutex_unlock(&vgdev->ddev->struct_mutex);
+	}
+}
+#endif
+
+int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+				   struct virtio_gpu_object *bo)
+{
+	int ret;
+	struct page **pages = bo->tbo.ttm->pages;
+	int nr_pages = bo->tbo.num_pages;
+
+	/* wtf swapping */
+	if (bo->pages)
+		return 0;
+
+	if (bo->tbo.ttm->state == tt_unpopulated)
+		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
+	bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!bo->pages)
+		goto out;
+
+	ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
+					nr_pages << PAGE_SHIFT, GFP_KERNEL);
+	if (ret)
+		goto out;
+	return 0;
+out:
+	kfree(bo->pages);
+	bo->pages = NULL;
+	return -ENOMEM;
+}
+
+void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
+{
+	sg_free_table(bo->pages);
+	kfree(bo->pages);
+	bo->pages = NULL;
+}
+
+int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	if (unlikely(r != 0))
+		return r;
+	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
new file mode 100644
index 0000000..a6f22e0
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -0,0 +1,451 @@ 
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "virtgpu_drv.h"
+
+#include <linux/delay.h>
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static struct
+virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
+{
+	struct virtio_gpu_mman *mman;
+	struct virtio_gpu_device *vgdev;
+
+	mman = container_of(bdev, struct virtio_gpu_mman, bdev);
+	vgdev = container_of(mman, struct virtio_gpu_device, mman);
+	return vgdev;
+}
+
+static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	vgdev->mman.mem_global_referenced = false;
+	global_ref = &vgdev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &virtio_gpu_ttm_mem_global_init;
+	global_ref->release = &virtio_gpu_ttm_mem_global_release;
+
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	vgdev->mman.bo_global_ref.mem_glob =
+		vgdev->mman.mem_global_ref.object;
+	global_ref = &vgdev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&vgdev->mman.mem_global_ref);
+		return r;
+	}
+
+	vgdev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
+{
+	if (vgdev->mman.mem_global_referenced) {
+		drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&vgdev->mman.mem_global_ref);
+		vgdev->mman.mem_global_referenced = false;
+	}
+}
+
+static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	struct virtio_gpu_device *vgdev;
+	int r;
+
+	bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	if (bo == NULL)
+		return VM_FAULT_NOPAGE;
+	vgdev = virtio_gpu_get_vgdev(bo->bdev);
+	r = ttm_vm_ops->fault(vma, vmf);
+	return r;
+}
+
+int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct virtio_gpu_device *vgdev;
+	int r;
+
+	file_priv = filp->private_data;
+	vgdev = file_priv->minor->dev->dev_private;
+	if (vgdev == NULL) {
+		DRM_ERROR(
+		 "filp->private_data->minor->dev->dev_private == NULL\n");
+		return -EINVAL;
+	}
+	r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
+	if (unlikely(r != 0))
+		return r;
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
+		virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
+	}
+	vma->vm_ops = &virtio_gpu_ttm_vm_ops;
+	return 0;
+}
+
+static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
+					uint32_t flags)
+{
+	return 0;
+}
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+			       struct ttm_buffer_object *bo,
+			       const struct ttm_place *place,
+			       struct ttm_mem_reg *mem)
+{
+	mem->mm_node = (void *)1;
+	return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+				struct ttm_mem_reg *mem)
+{
+	mem->mm_node = (void *)NULL;
+	return;
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+			   unsigned long p_size)
+{
+	return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+	return 0;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+			     const char *prefix)
+{
+}
+
+static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
+	ttm_bo_man_init,
+	ttm_bo_man_takedown,
+	ttm_bo_man_get_node,
+	ttm_bo_man_put_node,
+	ttm_bo_man_debug
+};
+
+static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+				    struct ttm_mem_type_manager *man)
+{
+	struct virtio_gpu_device *vgdev;
+
+	vgdev = virtio_gpu_get_vgdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_TT:
+		man->func = &virtio_gpu_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	static struct ttm_place placements = {
+		.fpfn  = 0,
+		.lpfn  = 0,
+		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
+	};
+
+	placement->placement = &placements;
+	placement->busy_placement = &placements;
+	placement->num_placement = 1;
+	placement->num_busy_placement = 1;
+	return;
+}
+
+static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
+				    struct file *filp)
+{
+	return 0;
+}
+
+static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+					 struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+	case TTM_PL_TT:
+		/* system memory */
+		return 0;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				       struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct virtio_gpu_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct virtio_gpu_device		*vgdev;
+	u64				offset;
+};
+
+static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
+				       struct ttm_mem_reg *bo_mem)
+{
+	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	/* Not implemented */
+	return 0;
+}
+
+static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	/* Not implemented */
+	return 0;
+}
+
+static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func virtio_gpu_backend_func = {
+	.bind = &virtio_gpu_ttm_backend_bind,
+	.unbind = &virtio_gpu_ttm_backend_unbind,
+	.destroy = &virtio_gpu_ttm_backend_destroy,
+};
+
+static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	return ttm_pool_populate(ttm);
+}
+
+static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
+					       unsigned long size,
+					       uint32_t page_flags,
+					       struct page *dummy_read_page)
+{
+	struct virtio_gpu_device *vgdev;
+	struct virtio_gpu_ttm_tt *gtt;
+
+	vgdev = virtio_gpu_get_vgdev(bdev);
+	gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL)
+		return NULL;
+	gtt->ttm.ttm.func = &virtio_gpu_backend_func;
+	gtt->vgdev = vgdev;
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+			    dummy_read_page)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
+				 struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
+			      bool evict, bool interruptible,
+			      bool no_wait_gpu,
+			      struct ttm_mem_reg *new_mem)
+{
+	virtio_gpu_move_null(bo, new_mem);
+	return 0;
+}
+
+static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
+				      struct ttm_mem_reg *new_mem)
+{
+	struct virtio_gpu_object *bo;
+	struct virtio_gpu_device *vgdev;
+
+	bo = container_of(tbo, struct virtio_gpu_object, tbo);
+	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+
+	if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
+		if (bo->hw_res_handle)
+			virtio_gpu_cmd_resource_inval_backing(vgdev,
+							   bo->hw_res_handle);
+
+	} else if (new_mem->placement & TTM_PL_FLAG_TT) {
+		if (bo->hw_res_handle) {
+			virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
+					      NULL);
+		}
+	}
+
+	return;
+}
+
+static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
+{
+	struct virtio_gpu_object *bo;
+	struct virtio_gpu_device *vgdev;
+
+	bo = container_of(tbo, struct virtio_gpu_object, tbo);
+	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+
+	if (bo->pages)
+		virtio_gpu_object_free_sg_table(bo);
+}
+
+static struct ttm_bo_driver virtio_gpu_bo_driver = {
+	.ttm_tt_create = &virtio_gpu_ttm_tt_create,
+	.ttm_tt_populate = &virtio_gpu_ttm_tt_populate,
+	.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
+	.invalidate_caches = &virtio_gpu_invalidate_caches,
+	.init_mem_type = &virtio_gpu_init_mem_type,
+	.evict_flags = &virtio_gpu_evict_flags,
+	.move = &virtio_gpu_bo_move,
+	.verify_access = &virtio_gpu_verify_access,
+	.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
+	.io_mem_free = &virtio_gpu_ttm_io_mem_free,
+	.move_notify = &virtio_gpu_bo_move_notify,
+	.swap_notify = &virtio_gpu_bo_swap_notify,
+};
+
+int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
+{
+	int r;
+
+	r = virtio_gpu_ttm_global_init(vgdev);
+	if (r)
+		return r;
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&vgdev->mman.bdev,
+			       vgdev->mman.bo_global_ref.ref.object,
+			       &virtio_gpu_bo_driver,
+			       vgdev->ddev->anon_inode->i_mapping,
+			       DRM_FILE_PAGE_OFFSET, 0);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+
+	r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
+	if (r) {
+		DRM_ERROR("Failed initializing GTT heap.\n");
+		return r;
+	}
+	return 0;
+}
+
+void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
+{
+	ttm_bo_device_release(&vgdev->mman.bdev);
+	virtio_gpu_ttm_global_fini(vgdev);
+	DRM_INFO("virtio_gpu: ttm finalized\n");
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
new file mode 100644
index 0000000..a98cda8
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -0,0 +1,540 @@ 
+#include <drm/drmP.h>
+#include "virtgpu_drv.h"
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
+{
+	int handle;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&vgdev->resource_idr_lock);
+	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
+	spin_unlock(&vgdev->resource_idr_lock);
+	idr_preload_end();
+	*resid = handle;
+	return 0;
+}
+
+void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
+{
+	spin_lock(&vgdev->resource_idr_lock);
+	idr_remove(&vgdev->resource_idr, id);
+	spin_unlock(&vgdev->resource_idr_lock);
+}
+
+void virtio_gpu_ctrl_ack(struct virtqueue *vq)
+{
+	struct drm_device *dev = vq->vdev->priv;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	schedule_work(&vgdev->ctrlq.dequeue_work);
+}
+
+void virtio_gpu_cursor_ack(struct virtqueue *vq)
+{
+	struct drm_device *dev = vq->vdev->priv;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	schedule_work(&vgdev->cursorq.dequeue_work);
+}
+
+static struct virtio_gpu_vbuffer*
+virtio_gpu_allocate_vbuf(struct virtio_gpu_device *vgdev,
+			 int size, int resp_size,
+			 virtio_gpu_resp_cb resp_cb)
+{
+	struct virtio_gpu_vbuffer *vbuf;
+
+	vbuf = kzalloc(sizeof(*vbuf) + size + resp_size, GFP_KERNEL);
+	if (!vbuf)
+		goto fail;
+
+	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+	vbuf->size = size;
+
+	vbuf->resp_cb = resp_cb;
+	if (resp_size)
+		vbuf->resp_buf = (void *)vbuf->buf + size;
+	else
+		vbuf->resp_buf = NULL;
+	vbuf->resp_size = resp_size;
+
+	return vbuf;
+fail:
+	kfree(vbuf);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+				  struct virtio_gpu_vbuffer **vbuffer_p,
+				  int size)
+{
+	struct virtio_gpu_vbuffer *vbuf;
+
+	vbuf = virtio_gpu_allocate_vbuf(vgdev, size,
+				     sizeof(struct virtio_gpu_ctrl_hdr), NULL);
+	if (IS_ERR(vbuf)) {
+		*vbuffer_p = NULL;
+		return ERR_CAST(vbuf);
+	}
+	*vbuffer_p = vbuf;
+	return vbuf->buf;
+}
+
+static struct virtio_gpu_update_cursor*
+virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
+			struct virtio_gpu_vbuffer **vbuffer_p)
+{
+	struct virtio_gpu_vbuffer *vbuf;
+
+	vbuf = virtio_gpu_allocate_vbuf
+		(vgdev, sizeof(struct virtio_gpu_update_cursor), 0, NULL);
+	if (IS_ERR(vbuf)) {
+		*vbuffer_p = NULL;
+		return ERR_CAST(vbuf);
+	}
+	*vbuffer_p = vbuf;
+	return (struct virtio_gpu_update_cursor *)vbuf->buf;
+}
+
+static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+				       virtio_gpu_resp_cb cb,
+				       struct virtio_gpu_vbuffer **vbuffer_p,
+				       int cmd_size, int resp_size)
+{
+	struct virtio_gpu_vbuffer *vbuf;
+
+	vbuf = virtio_gpu_allocate_vbuf(vgdev, cmd_size, resp_size, cb);
+	if (IS_ERR(vbuf)) {
+		*vbuffer_p = NULL;
+		return ERR_CAST(vbuf);
+	}
+	*vbuffer_p = vbuf;
+	return (struct virtio_gpu_command *)vbuf->buf;
+}
+
+static void free_vbuf(struct virtio_gpu_device *vgdev,
+		      struct virtio_gpu_vbuffer *vbuf)
+{
+	kfree(vbuf->data_buf);
+	kfree(vbuf);
+}
+
+static int reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
+{
+	struct virtio_gpu_vbuffer *vbuf;
+	unsigned int len;
+	int freed = 0;
+	while ((vbuf = virtqueue_get_buf(vq, &len))) {
+		list_add_tail(&vbuf->destroy_list, reclaim_list);
+		freed++;
+	}
+	return freed;
+}
+
+void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
+{
+	struct virtio_gpu_device *vgdev =
+		container_of(work, struct virtio_gpu_device,
+			     ctrlq.dequeue_work);
+	int ret;
+	struct list_head reclaim_list;
+	struct virtio_gpu_vbuffer *entry, *tmp;
+	struct virtio_gpu_ctrl_hdr *resp;
+	u64 fence_id = 0;
+
+	INIT_LIST_HEAD(&reclaim_list);
+	spin_lock(&vgdev->ctrlq.qlock);
+	do {
+		virtqueue_disable_cb(vgdev->ctrlq.vq);
+		ret = reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
+		if (ret == 0)
+			DRM_DEBUG("cleaned 0 buffers wierd\n");
+
+	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
+	spin_unlock(&vgdev->ctrlq.qlock);
+
+	list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
+		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
+			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
+			u64 f = le64_to_cpu(resp->fence_id);
+
+			if (fence_id > f) {
+				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
+					  __func__, fence_id, f);
+			} else {
+				fence_id = f;
+			}
+		}
+		if (entry->resp_cb)
+			entry->resp_cb(vgdev, entry);
+
+		list_del(&entry->destroy_list);
+		free_vbuf(vgdev, entry);
+	}
+	wake_up(&vgdev->ctrlq.ack_queue);
+
+	if (fence_id) {
+		virtio_gpu_fence_event_process(vgdev, fence_id);
+	}
+}
+
+void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
+{
+	struct virtio_gpu_device *vgdev =
+		container_of(work, struct virtio_gpu_device,
+			     cursorq.dequeue_work);
+	struct virtqueue *vq = vgdev->cursorq.vq;
+	struct list_head reclaim_list;
+	struct virtio_gpu_vbuffer *entry, *tmp;
+	unsigned int len;
+	int ret;
+
+	INIT_LIST_HEAD(&reclaim_list);
+	spin_lock(&vgdev->cursorq.qlock);
+	do {
+		virtqueue_disable_cb(vgdev->cursorq.vq);
+		ret = reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
+		if (ret == 0)
+			DRM_DEBUG("cleaned 0 buffers wierd\n");
+		while (virtqueue_get_buf(vq, &len))
+			/* nothing */;
+	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
+	spin_unlock(&vgdev->cursorq.qlock);
+
+	list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
+		list_del(&entry->destroy_list);
+		free_vbuf(vgdev, entry);
+	}
+	wake_up(&vgdev->cursorq.ack_queue);
+}
+
+static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+					struct virtio_gpu_vbuffer *vbuf)
+{
+	struct virtqueue *vq = vgdev->ctrlq.vq;
+	struct scatterlist *sgs[3], vcmd, vout, vresp;
+	int outcnt = 0, incnt = 0;
+	int ret;
+
+	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+	sgs[outcnt+incnt] = &vcmd;
+	outcnt++;
+
+	if (vbuf->data_buf) {
+		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+		sgs[outcnt+incnt] = &vout;
+		outcnt++;
+	}
+
+	if (vbuf->resp_buf) {
+		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+		sgs[outcnt+incnt] = &vresp;
+		incnt++;
+	}
+
+	spin_lock(&vgdev->ctrlq.qlock);
+retry:
+	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+	if (ret == -ENOSPC) {
+		spin_unlock(&vgdev->ctrlq.qlock);
+		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+		spin_lock(&vgdev->ctrlq.qlock);
+		goto retry;
+	} else {
+		virtqueue_kick(vq);
+	}
+	spin_unlock(&vgdev->ctrlq.qlock);
+
+	if (!ret)
+		ret = vq->num_free;
+	return ret;
+}
+
+static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+				   struct virtio_gpu_vbuffer *vbuf)
+{
+	struct virtqueue *vq = vgdev->cursorq.vq;
+	struct scatterlist *sgs[1], ccmd;
+	int ret;
+	int outcnt;
+
+	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
+	sgs[0] = &ccmd;
+	outcnt = 1;
+
+	spin_lock(&vgdev->cursorq.qlock);
+retry:
+	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
+	if (ret == -ENOSPC) {
+		spin_unlock(&vgdev->cursorq.qlock);
+		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+		spin_lock(&vgdev->cursorq.qlock);
+		goto retry;
+	} else {
+		virtqueue_kick(vq);
+	}
+
+	spin_unlock(&vgdev->cursorq.qlock);
+
+	if (!ret)
+		ret = vq->num_free;
+	return ret;
+}
+
+/* just create gem objects for userspace and long lived objects,
+   just use dma_alloced pages for the queue objects? */
+
+/* create a basic resource */
+int virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
+				   uint32_t resource_id,
+				   uint32_t format,
+				   uint32_t width,
+				   uint32_t height)
+{
+	struct virtio_gpu_resource_create_2d *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->format = cpu_to_le32(format);
+	cmd_p->width = cpu_to_le32(width);
+	cmd_p->height = cpu_to_le32(height);
+
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+	return 0;
+}
+
+int virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+				  uint32_t resource_id)
+{
+	struct virtio_gpu_resource_unref *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	return 0;
+}
+
+int virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+					  uint32_t resource_id)
+{
+	struct virtio_gpu_resource_detach_backing *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+	return 0;
+}
+
+int virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+			       uint32_t scanout_id, uint32_t resource_id,
+			       uint32_t width, uint32_t height,
+			       uint32_t x, uint32_t y)
+{
+	struct virtio_gpu_set_scanout *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->scanout_id = cpu_to_le32(scanout_id);
+	cmd_p->r.width = cpu_to_le32(width);
+	cmd_p->r.height = cpu_to_le32(height);
+	cmd_p->r.x = cpu_to_le32(x);
+	cmd_p->r.y = cpu_to_le32(y);
+
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	return 0;
+}
+
+int virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+				  uint32_t resource_id,
+				  uint32_t x, uint32_t y,
+				  uint32_t width, uint32_t height)
+{
+	struct virtio_gpu_resource_flush *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->r.width = cpu_to_le32(width);
+	cmd_p->r.height = cpu_to_le32(height);
+	cmd_p->r.x = cpu_to_le32(x);
+	cmd_p->r.y = cpu_to_le32(y);
+
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+	return 0;
+}
+
+int virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+				       uint32_t resource_id, uint64_t offset,
+				       __le32 width, __le32 height,
+				       __le32 x, __le32 y,
+				       struct virtio_gpu_fence **fence)
+{
+	struct virtio_gpu_transfer_to_host_2d *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->offset = cpu_to_le64(offset);
+	cmd_p->r.width = width;
+	cmd_p->r.height = height;
+	cmd_p->r.x = x;
+	cmd_p->r.y = y;
+
+	if (fence)
+		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+	return 0;
+}
+
+static int
+virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
+				       uint32_t resource_id,
+				       struct virtio_gpu_mem_entry *ents,
+				       uint32_t nents,
+				       struct virtio_gpu_fence **fence)
+{
+	struct virtio_gpu_resource_attach_backing *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
+	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->nr_entries = cpu_to_le32(nents);
+
+	vbuf->data_buf = ents;
+	vbuf->data_size = sizeof(*ents) * nents;
+
+	if (fence)
+		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+
+	return 0;
+}
+
+static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
+					       struct virtio_gpu_vbuffer *vbuf)
+{
+	struct virtio_gpu_resp_display_info *resp =
+		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
+	int i;
+
+	spin_lock(&vgdev->display_info_lock);
+	for (i = 0; i < vgdev->num_scanouts; i++) {
+		vgdev->outputs[i].info = resp->pmodes[i];
+		if (resp->pmodes[i].enabled) {
+			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
+				  le32_to_cpu(resp->pmodes[i].r.width),
+				  le32_to_cpu(resp->pmodes[i].r.height),
+				  le32_to_cpu(resp->pmodes[i].r.x),
+				  le32_to_cpu(resp->pmodes[i].r.y));
+		} else {
+			DRM_DEBUG("output %d: disabled", i);
+		}
+	}
+
+	spin_unlock(&vgdev->display_info_lock);
+	wake_up(&vgdev->resp_wq);
+
+	if (!drm_helper_hpd_irq_event(vgdev->ddev)) {
+		drm_kms_helper_hotplug_event(vgdev->ddev);
+	}
+}
+
+int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
+{
+	struct virtio_gpu_ctrl_hdr *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+
+	cmd_p = virtio_gpu_alloc_cmd_resp
+		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
+		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info));
+	memset(cmd_p, 0, sizeof(*cmd_p));
+
+	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	return 0;
+}
+
+int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+			     struct virtio_gpu_object *obj,
+			     uint32_t resource_id,
+			     struct virtio_gpu_fence **fence)
+{
+	struct virtio_gpu_mem_entry *ents;
+	struct scatterlist *sg;
+	int si;
+
+	if (!obj->pages) {
+		int ret;
+		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
+		if (ret)
+			return ret;
+	}
+
+	/* gets freed when the ring has consumed it */
+	ents = kmalloc_array(obj->pages->nents,
+			     sizeof(struct virtio_gpu_mem_entry),
+			     GFP_KERNEL);
+	if (!ents) {
+		DRM_ERROR("failed to allocate ent list\n");
+		return -ENOMEM;
+	}
+
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
+		ents[si].addr = cpu_to_le64(sg_phys(sg));
+		ents[si].length = cpu_to_le32(sg->length);
+		ents[si].padding = 0;
+	}
+
+	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
+					       ents, obj->pages->nents,
+					       fence);
+	obj->hw_res_handle = resource_id;
+	return 0;
+}
+
+void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
+			    struct virtio_gpu_output *output)
+{
+	struct virtio_gpu_vbuffer *vbuf;
+	struct virtio_gpu_update_cursor *cur_p;
+
+	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
+	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
+	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
+	virtio_gpu_queue_cursor(vgdev, vbuf);
+}
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index e894eb2..a3167fa 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -510,7 +510,7 @@  static int virtio_pci_probe(struct pci_dev *pci_dev,
 		goto err_enable_device;
 
 	rc = pci_request_regions(pci_dev, "virtio-pci");
-	if (rc)
+	if (rc && ((pci_dev->class >> 8) != PCI_CLASS_DISPLAY_VGA))
 		goto err_request_regions;
 
 	if (force_legacy) {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e928625..a1067c4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -799,6 +799,7 @@  struct drm_device {
 #endif
 
 	struct platform_device *platformdev; /**< Platform device struture */
+	struct virtio_device *virtdev;
 
 	struct drm_sg_mem *sg;	/**< Scatter gather memory */
 	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 68ceb97..9707e5d 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -429,6 +429,7 @@  header-y += virtio_balloon.h
 header-y += virtio_blk.h
 header-y += virtio_config.h
 header-y += virtio_console.h
+header-y += virtio_gpu.h
 header-y += virtio_ids.h
 header-y += virtio_net.h
 header-y += virtio_pci.h
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
new file mode 100644
index 0000000..a1bda52
--- /dev/null
+++ b/include/uapi/linux/virtio_gpu.h
@@ -0,0 +1,203 @@ 
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ *     Dave Airlie <airlied@redhat.com>
+ *     Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef VIRTIO_GPU_HW_H
+#define VIRTIO_GPU_HW_H
+
+enum virtio_gpu_ctrl_type {
+	VIRTIO_GPU_UNDEFINED = 0,
+
+	/* 2d commands */
+	VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
+	VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
+	VIRTIO_GPU_CMD_RESOURCE_UNREF,
+	VIRTIO_GPU_CMD_SET_SCANOUT,
+	VIRTIO_GPU_CMD_RESOURCE_FLUSH,
+	VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
+	VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
+	VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
+
+	/* cursor commands */
+	VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
+	VIRTIO_GPU_CMD_MOVE_CURSOR,
+
+	/* success responses */
+	VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
+	VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
+
+	/* error responses */
+	VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
+	VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
+	VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
+	VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
+	VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
+	VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+
+struct virtio_gpu_ctrl_hdr {
+	__le32 type;
+	__le32 flags;
+	__le64 fence_id;
+	__le32 ctx_id;
+	__le32 padding;
+};
+
+/* data passed in the cursor vq */
+
+struct virtio_gpu_cursor_pos {
+	__le32 scanout_id;
+	__le32 x, y;
+	__le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
+struct virtio_gpu_update_cursor {
+	struct virtio_gpu_ctrl_hdr hdr;
+	struct virtio_gpu_cursor_pos pos;  /* update & move */
+	__le32 resource_id;           /* update only */
+	__le32 hot_x;                 /* update only */
+	__le32 hot_y;                 /* update only */
+	__le32 padding;
+};
+
+/* data passed in the control vq, 2d related */
+
+struct virtio_gpu_rect {
+	__le32 x, y;
+	__le32 width;
+	__le32 height;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
+struct virtio_gpu_resource_unref {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 resource_id;
+	__le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
+struct virtio_gpu_resource_create_2d {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 resource_id;
+	__le32 format;
+	__le32 width;
+	__le32 height;
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT */
+struct virtio_gpu_set_scanout {
+	struct virtio_gpu_ctrl_hdr hdr;
+	struct virtio_gpu_rect r;
+	__le32 scanout_id;
+	__le32 resource_id;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
+struct virtio_gpu_resource_flush {
+	struct virtio_gpu_ctrl_hdr hdr;
+	struct virtio_gpu_rect r;
+	__le32 resource_id;
+	__le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
+struct virtio_gpu_transfer_to_host_2d {
+	struct virtio_gpu_ctrl_hdr hdr;
+	struct virtio_gpu_rect r;
+	__le64 offset;
+	__le32 resource_id;
+	__le32 padding;
+};
+
+struct virtio_gpu_mem_entry {
+	__le64 addr;
+	__le32 length;
+	__le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
+struct virtio_gpu_resource_attach_backing {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 resource_id;
+	__le32 nr_entries;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
+struct virtio_gpu_resource_detach_backing {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 resource_id;
+	__le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
+#define VIRTIO_GPU_MAX_SCANOUTS 16
+struct virtio_gpu_resp_display_info {
+	struct virtio_gpu_ctrl_hdr hdr;
+	struct virtio_gpu_display_one {
+		struct virtio_gpu_rect r;
+		__le32 enabled;
+		__le32 flags;
+	} pmodes[VIRTIO_GPU_MAX_SCANOUTS];
+};
+
+#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
+
+struct virtio_gpu_config {
+	__u32 events_read;
+	__u32 events_clear;
+	__u32 num_scanouts;
+	__u32 reserved;
+};
+
+/* simple formats for fbcon/X use */
+enum virtio_gpu_formats {
+	VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM  = 1,
+	VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM  = 2,
+	VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM  = 3,
+	VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM  = 4,
+
+	VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM  = 67,
+	VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM  = 68,
+
+	VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM  = 121,
+	VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM  = 134,
+
+};
+
+#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 284fc3a..14d77f7 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -39,5 +39,5 @@ 
 #define VIRTIO_ID_9P		9 /* 9p virtio console */
 #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
 #define VIRTIO_ID_CAIF	       12 /* Virtio caif */
-
+#define VIRTIO_ID_GPU          16
 #endif /* _LINUX_VIRTIO_IDS_H */