diff mbox series

[v4,3/5] drm/virtio: Add helpers to initialize and free the imported object

Message ID 20241125073313.3361612-4-vivek.kasireddy@intel.com (mailing list archive)
State New
Headers show
Series drm/virtio: Import scanout buffers from other devices | expand

Commit Message

Kasireddy, Vivek Nov. 25, 2024, 7:31 a.m. UTC
The imported object can be considered a guest blob resource;
therefore, we use create_blob cmd while creating it. These helpers
are used in the next patch which does the actual import.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gurchetan Singh <gurchetansingh@chromium.org>
Cc: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
 drivers/gpu/drm/virtio/virtgpu_object.c |  3 +
 drivers/gpu/drm/virtio/virtgpu_prime.c  | 73 +++++++++++++++++++++++++
 2 files changed, 76 insertions(+)

Comments

Dmitry Osipenko Nov. 25, 2024, 4:27 p.m. UTC | #1
On 11/25/24 10:31, Vivek Kasireddy wrote:
> +static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
> +{
> +	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
> +	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
> +	struct dma_buf_attachment *attach = obj->import_attach;
> +
> +	if (attach) {
> +		virtio_gpu_detach_object_fenced(bo);
> +
> +		if (bo->sgt)
> +			dma_buf_unmap_attachment(attach, bo->sgt,
> +						 DMA_BIDIRECTIONAL);

I've caught this problem, we're not holding resv lock:

[   28.550460] WARNING: CPU: 6 PID: 759 at
drivers/dma-buf/dma-buf.c:1228 dma_buf_unmap_attachment+0x84/0x90
...
[   28.554217]  ? __warn.cold+0xb7/0x150
[   28.554322]  ? dma_buf_unmap_attachment+0x84/0x90
[   28.554455]  ? report_bug+0xf7/0x140
[   28.554560]  ? handle_bug+0x4f/0x90
[   28.554661]  ? exc_invalid_op+0x13/0x60
[   28.554767]  ? asm_exc_invalid_op+0x16/0x20
[   28.554885]  ? dma_buf_unmap_attachment+0x84/0x90
[   28.555018]  ? dma_buf_unmap_attachment+0x80/0x90
[   28.555160]  virtgpu_dma_buf_free_obj+0x3b/0x80
[   28.555294]  drm_gem_object_release_handle+0x4d/0x60
[   28.555435]  ? drm_gem_object_handle_put_unlocked+0xe0/0xe0
[   28.555587]  idr_for_each+0x4b/0xb0
[   28.555691]  drm_gem_release+0x1f/0x30
[   28.555798]  drm_file_free+0x202/0x290
[   28.555905]  drm_release+0x5f/0xc0
[   28.556001]  __fput+0xf9/0x2b0
[   28.556093]  task_work_run+0x55/0x90
[   28.556219]  do_exit+0x313/0xaa0
[   28.556315]  ? lock_release+0xb6/0x260
[   28.556423]  do_group_exit+0x32/0xa0
[   28.556525]  __x64_sys_exit_group+0x14/0x20
[   28.556645]  x64_sys_call+0x714/0x720
[   28.556751]  do_syscall_64+0x54/0xf0
[   28.556854]  entry_SYSCALL_64_after_hwframe+0x4b/0x53

both virtio_gpu_detach_object_fenced() and dma_buf_unmap_attachment()
need to be called under resv lock.

Won't hurt adding dma_resv_assert_held() to
virtio_gpu_detach_object_fenced().
Dmitry Osipenko Nov. 25, 2024, 5:06 p.m. UTC | #2
On 11/25/24 19:27, Dmitry Osipenko wrote:
> On 11/25/24 10:31, Vivek Kasireddy wrote:
>> +static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
>> +{
>> +	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
>> +	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
>> +	struct dma_buf_attachment *attach = obj->import_attach;
>> +
>> +	if (attach) {
>> +		virtio_gpu_detach_object_fenced(bo);
>> +
>> +		if (bo->sgt)
>> +			dma_buf_unmap_attachment(attach, bo->sgt,
>> +						 DMA_BIDIRECTIONAL);
> 
> I've caught this problem, we're not holding resv lock:
> 
> [   28.550460] WARNING: CPU: 6 PID: 759 at
> drivers/dma-buf/dma-buf.c:1228 dma_buf_unmap_attachment+0x84/0x90
> ...
> [   28.554217]  ? __warn.cold+0xb7/0x150
> [   28.554322]  ? dma_buf_unmap_attachment+0x84/0x90
> [   28.554455]  ? report_bug+0xf7/0x140
> [   28.554560]  ? handle_bug+0x4f/0x90
> [   28.554661]  ? exc_invalid_op+0x13/0x60
> [   28.554767]  ? asm_exc_invalid_op+0x16/0x20
> [   28.554885]  ? dma_buf_unmap_attachment+0x84/0x90
> [   28.555018]  ? dma_buf_unmap_attachment+0x80/0x90
> [   28.555160]  virtgpu_dma_buf_free_obj+0x3b/0x80
> [   28.555294]  drm_gem_object_release_handle+0x4d/0x60
> [   28.555435]  ? drm_gem_object_handle_put_unlocked+0xe0/0xe0
> [   28.555587]  idr_for_each+0x4b/0xb0
> [   28.555691]  drm_gem_release+0x1f/0x30
> [   28.555798]  drm_file_free+0x202/0x290
> [   28.555905]  drm_release+0x5f/0xc0
> [   28.556001]  __fput+0xf9/0x2b0
> [   28.556093]  task_work_run+0x55/0x90
> [   28.556219]  do_exit+0x313/0xaa0
> [   28.556315]  ? lock_release+0xb6/0x260
> [   28.556423]  do_group_exit+0x32/0xa0
> [   28.556525]  __x64_sys_exit_group+0x14/0x20
> [   28.556645]  x64_sys_call+0x714/0x720
> [   28.556751]  do_syscall_64+0x54/0xf0
> [   28.556854]  entry_SYSCALL_64_after_hwframe+0x4b/0x53
> 
> both virtio_gpu_detach_object_fenced() and dma_buf_unmap_attachment()
> need to be called under resv lock.
> 
> Won't hurt adding dma_resv_assert_held() to
> virtio_gpu_detach_object_fenced().
> 

On the other hand, nobody uses BO when it's freed, hence locking
virtio_gpu_detach_object_fenced() not needed. Should be enough to use
dma_buf_unmap_attachment_unlocked() then.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 53bb02c8a135..2b19bb8c6ec3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -80,6 +80,9 @@  void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 		drm_gem_free_mmap_offset(&vram->base.base.base);
 		drm_gem_object_release(&vram->base.base.base);
 		kfree(vram);
+	} else {
+		drm_gem_object_release(&bo->base.base);
+		kfree(bo);
 	}
 }
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 2d644240a2b7..d265825b6ae5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -185,6 +185,79 @@  int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
 	return 0;
 }
 
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+	struct dma_buf_attachment *attach = obj->import_attach;
+
+	if (attach) {
+		virtio_gpu_detach_object_fenced(bo);
+
+		if (bo->sgt)
+			dma_buf_unmap_attachment(attach, bo->sgt,
+						 DMA_BIDIRECTIONAL);
+
+		dma_buf_detach(attach->dmabuf, attach);
+		dma_buf_put(attach->dmabuf);
+	}
+
+	if (bo->created) {
+		virtio_gpu_cmd_unref_resource(vgdev, bo);
+		virtio_gpu_notify(vgdev);
+		return;
+	}
+	virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+				    struct virtio_gpu_object *bo,
+				    struct dma_buf_attachment *attach)
+{
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct virtio_gpu_object_params params = { 0 };
+	struct dma_resv *resv = attach->dmabuf->resv;
+	struct virtio_gpu_mem_entry *ents = NULL;
+	unsigned int nents;
+	int ret;
+
+	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+	if (ret) {
+		virtgpu_dma_buf_free_obj(&bo->base.base);
+		return ret;
+	}
+
+	dma_resv_lock(resv, NULL);
+
+	ret = dma_buf_pin(attach);
+	if (ret)
+		goto err_pin;
+
+	ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+	if (ret)
+		goto err_import;
+
+	bo->guest_blob = true;
+	params.blob = true;
+	params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+	params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+	params.size = attach->dmabuf->size;
+
+	virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
+					    ents, nents);
+	dma_buf_unpin(attach);
+	dma_resv_unlock(resv);
+
+	return 0;
+
+err_import:
+	dma_buf_unpin(attach);
+err_pin:
+	dma_resv_unlock(resv);
+	virtgpu_dma_buf_free_obj(&bo->base.base);
+	return ret;
+}
+
 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
 						struct dma_buf *buf)
 {