diff mbox series

[v4,4/5] drm/virtio: Import prime buffers from other devices as guest blobs

Message ID 20241125073313.3361612-5-vivek.kasireddy@intel.com (mailing list archive)
State New
Headers show
Series drm/virtio: Import scanout buffers from other devices | expand

Commit Message

Kasireddy, Vivek Nov. 25, 2024, 7:31 a.m. UTC
By importing scanout buffers from other devices, we should be able
to use the virtio-gpu driver in KMS only mode. Note that we attach
dynamically and register a move_notify() callback so that we can
let the VMM know of any location changes associated with the backing
store of the imported object by sending detach_backing cmd.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gurchetan Singh <gurchetansingh@chromium.org>
Cc: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
 drivers/gpu/drm/virtio/virtgpu_prime.c | 56 +++++++++++++++++++++++++-
 1 file changed, 55 insertions(+), 1 deletion(-)

Comments

Dmitry Osipenko Nov. 25, 2024, 4:34 p.m. UTC | #1
On 11/25/24 10:31, Vivek Kasireddy wrote:
>  struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
>  						struct dma_buf *buf)
>  {
> +	struct virtio_gpu_device *vgdev = dev->dev_private;
> +	struct dma_buf_attachment *attach;
> +	struct virtio_gpu_object *bo;
>  	struct drm_gem_object *obj;
> +	int ret;
>  
>  	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
>  		obj = buf->priv;
> @@ -275,7 +304,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
>  		}
>  	}
>  
> -	return drm_gem_prime_import(dev, buf);
> +	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
> +		return drm_gem_prime_import(dev, buf);
> +
> +	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
> +	if (!bo)
> +		return ERR_PTR(-ENOMEM);
> +
> +	obj = &bo->base.base;
> +	obj->funcs = &virtgpu_gem_dma_buf_funcs;
> +	drm_gem_private_object_init(dev, obj, buf->size);
> +
> +	attach = dma_buf_dynamic_attach(buf, dev->dev,
> +					&virtgpu_dma_buf_attach_ops, obj);
> +	if (IS_ERR(attach)) {
> +		kfree(bo);
> +		return ERR_CAST(attach);
> +	}
> +
> +	obj->import_attach = attach;
> +	get_dma_buf(buf);
> +
> +	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
> +	if (ret < 0)
> +		return ERR_PTR(ret);

Perhaps for a future improvement. Think we can defer
virtgpu_dma_buf_init_obj() until first use of the object in a case where
exporter supports dynamic attachment. Otherwise, we're pinning object at
the import time, partially defeating the purpose of the dynamic
attachment, AFAICT. I.e. if importer never uses object, then there is no
need to bother the exporter with the pinning.
Kasireddy, Vivek Nov. 26, 2024, 3:19 a.m. UTC | #2
Hi Dmitry,

> Subject: Re: [PATCH v4 4/5] drm/virtio: Import prime buffers from other
> devices as guest blobs
> 
> >  struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device
> *dev,
> >  						struct dma_buf *buf)
> >  {
> > +	struct virtio_gpu_device *vgdev = dev->dev_private;
> > +	struct dma_buf_attachment *attach;
> > +	struct virtio_gpu_object *bo;
> >  	struct drm_gem_object *obj;
> > +	int ret;
> >
> >  	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
> >  		obj = buf->priv;
> > @@ -275,7 +304,32 @@ struct drm_gem_object
> *virtgpu_gem_prime_import(struct drm_device *dev,
> >  		}
> >  	}
> >
> > -	return drm_gem_prime_import(dev, buf);
> > +	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
> > +		return drm_gem_prime_import(dev, buf);
> > +
> > +	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
> > +	if (!bo)
> > +		return ERR_PTR(-ENOMEM);
> > +
> > +	obj = &bo->base.base;
> > +	obj->funcs = &virtgpu_gem_dma_buf_funcs;
> > +	drm_gem_private_object_init(dev, obj, buf->size);
> > +
> > +	attach = dma_buf_dynamic_attach(buf, dev->dev,
> > +					&virtgpu_dma_buf_attach_ops, obj);
> > +	if (IS_ERR(attach)) {
> > +		kfree(bo);
> > +		return ERR_CAST(attach);
> > +	}
> > +
> > +	obj->import_attach = attach;
> > +	get_dma_buf(buf);
> > +
> > +	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
> > +	if (ret < 0)
> > +		return ERR_PTR(ret);
> 
> Perhaps for a future improvement. Think we can defer
> virtgpu_dma_buf_init_obj() until first use of the object in a case where
I believe it might be possible to do that. I think I started out that way but
figured we would need the addresses/backing in order to create a guest
blob resource. Anyway, I'll try to test this idea again when I get a chance.

> exporter supports dynamic attachment. Otherwise, we're pinning object at
> the import time, partially defeating the purpose of the dynamic
> attachment, AFAICT. I.e. if importer never uses object, then there is no
> need to bother the exporter with the pinning.
Yeah, I agree. It makes sense to defer the initialization until first usage.

Thanks,
Vivek
> 
> --
> Best regards,
> Dmitry
diff mbox series

Patch

diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index d265825b6ae5..5031b800c521 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -258,10 +258,39 @@  static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
 	return ret;
 }
 
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+	.free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = attach->importer_priv;
+	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+	if (bo->created) {
+		virtio_gpu_detach_object_fenced(bo);
+
+		if (bo->sgt)
+			dma_buf_unmap_attachment(attach, bo->sgt,
+						 DMA_BIDIRECTIONAL);
+
+		bo->sgt = NULL;
+	}
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+	.allow_peer2peer = true,
+	.move_notify = virtgpu_dma_buf_move_notify
+};
+
 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
 						struct dma_buf *buf)
 {
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct dma_buf_attachment *attach;
+	struct virtio_gpu_object *bo;
 	struct drm_gem_object *obj;
+	int ret;
 
 	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
 		obj = buf->priv;
@@ -275,7 +304,32 @@  struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
 		}
 	}
 
-	return drm_gem_prime_import(dev, buf);
+	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+		return drm_gem_prime_import(dev, buf);
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (!bo)
+		return ERR_PTR(-ENOMEM);
+
+	obj = &bo->base.base;
+	obj->funcs = &virtgpu_gem_dma_buf_funcs;
+	drm_gem_private_object_init(dev, obj, buf->size);
+
+	attach = dma_buf_dynamic_attach(buf, dev->dev,
+					&virtgpu_dma_buf_attach_ops, obj);
+	if (IS_ERR(attach)) {
+		kfree(bo);
+		return ERR_CAST(attach);
+	}
+
+	obj->import_attach = attach;
+	get_dma_buf(buf);
+
+	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return obj;
 }
 
 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(