diff mbox series

[RFC,4/7] drm/virtio: Import prime buffers from other devices as guest blobs

Message ID 20240328083615.2662516-5-vivek.kasireddy@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/virtio: Import scanout buffers from other devices | expand

Commit Message

Kasireddy, Vivek March 28, 2024, 8:32 a.m. UTC
By importing scanout buffers from other devices, we should be able
to use the virtio-gpu driver in KMS only mode. Note that we attach
dynamically and register a move_notify() callback so that we can
let the VMM know of any location changes associated with the backing
store of the imported object by sending detach_backing cmd.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
 drivers/gpu/drm/virtio/virtgpu_prime.c | 54 +++++++++++++++++++++++++-
 1 file changed, 53 insertions(+), 1 deletion(-)

Comments

Daniel Vetter May 22, 2024, 7:28 a.m. UTC | #1
On Thu, Mar 28, 2024 at 01:32:57AM -0700, Vivek Kasireddy wrote:
> By importing scanout buffers from other devices, we should be able
> to use the virtio-gpu driver in KMS only mode. Note that we attach
> dynamically and register a move_notify() callback so that we can
> let the VMM know of any location changes associated with the backing
> store of the imported object by sending detach_backing cmd.
> 
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
> ---
>  drivers/gpu/drm/virtio/virtgpu_prime.c | 54 +++++++++++++++++++++++++-
>  1 file changed, 53 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
> index 1e87dbc9a897..c65dacc1b2b5 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_prime.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
> @@ -255,10 +255,36 @@ static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
>  	return ret;
>  }
>  
> +static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
> +	.free = virtgpu_dma_buf_free_obj,
> +};
> +
> +static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
> +{
> +	struct drm_gem_object *obj = attach->importer_priv;
> +	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
> +	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
> +
> +	if (bo->created) {
> +		virtio_gpu_cmd_resource_detach_backing(vgdev,
> +						       bo->hw_res_handle);
> +		bo->has_backing = false;
> +	}
> +}
> +
> +static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
> +	.allow_peer2peer = true,
> +	.move_notify = virtgpu_dma_buf_move_notify
> +};
> +
>  struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
>  						struct dma_buf *buf)
>  {
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct dma_buf_attachment *attach;
> +	struct virtio_gpu_object *bo;
>  	struct drm_gem_object *obj;
> +	int ret;
>  
>  	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
>  		obj = buf->priv;
> @@ -272,7 +298,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
>  		}
>  	}
>  
> -	return drm_gem_prime_import(dev, buf);

I think overall this (entire series) makes sense, but needs someone with
overall virtio understanding to make sure it all fits correctly. Just a
refactor thought here: I think instead of open-coding should we have a
drm_gem_prime_dynamic_import?

Similar in another patch for the dma_buf_pin, should that be also in the
gem helpers to automatically forward to dma_buf if it's imported?

Cheers, Sima

> +	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
> +		return drm_gem_prime_import(dev, buf);
> +
> +	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
> +	if (!bo)
> +		return ERR_PTR(-ENOMEM);
> +
> +	obj = &bo->base.base;
> +	obj->funcs = &virtgpu_gem_dma_buf_funcs;
> +	drm_gem_private_object_init(dev, obj, buf->size);
> +
> +	attach = dma_buf_dynamic_attach(buf, dev->dev,
> +					&virtgpu_dma_buf_attach_ops, obj);
> +	if (IS_ERR(attach)) {
> +		kfree(bo);
> +		return ERR_CAST(attach);
> +	}
> +
> +	obj->import_attach = attach;
> +	get_dma_buf(buf);
> +
> +	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
> +	if (ret < 0)
> +		return ERR_PTR(ret);
> +
> +	return obj;
>  }
>  
>  struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
> @@ -281,3 +332,4 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
>  {
>  	return ERR_PTR(-ENODEV);
>  }
> +
> -- 
> 2.43.0
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 1e87dbc9a897..c65dacc1b2b5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -255,10 +255,36 @@  static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
 	return ret;
 }
 
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+	.free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = attach->importer_priv;
+	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+	if (bo->created) {
+		virtio_gpu_cmd_resource_detach_backing(vgdev,
+						       bo->hw_res_handle);
+		bo->has_backing = false;
+	}
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+	.allow_peer2peer = true,
+	.move_notify = virtgpu_dma_buf_move_notify
+};
+
 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
 						struct dma_buf *buf)
 {
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct dma_buf_attachment *attach;
+	struct virtio_gpu_object *bo;
 	struct drm_gem_object *obj;
+	int ret;
 
 	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
 		obj = buf->priv;
@@ -272,7 +298,32 @@  struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
 		}
 	}
 
-	return drm_gem_prime_import(dev, buf);
+	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+		return drm_gem_prime_import(dev, buf);
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (!bo)
+		return ERR_PTR(-ENOMEM);
+
+	obj = &bo->base.base;
+	obj->funcs = &virtgpu_gem_dma_buf_funcs;
+	drm_gem_private_object_init(dev, obj, buf->size);
+
+	attach = dma_buf_dynamic_attach(buf, dev->dev,
+					&virtgpu_dma_buf_attach_ops, obj);
+	if (IS_ERR(attach)) {
+		kfree(bo);
+		return ERR_CAST(attach);
+	}
+
+	obj->import_attach = attach;
+	get_dma_buf(buf);
+
+	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return obj;
 }
 
 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
@@ -281,3 +332,4 @@  struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 {
 	return ERR_PTR(-ENODEV);
 }
+