diff mbox series

[v3,2/2] drm/virtio: New fence for every plane update

Message ID 20241020230803.247419-2-dmitry.osipenko@collabora.com (mailing list archive)
State New, archived
Headers show
Series [v3,1/2] drm/virtio: Use drm_gem_plane_helper_prepare_fb() | expand

Commit Message

Dmitry Osipenko Oct. 20, 2024, 11:08 p.m. UTC
From: Dongwon Kim <dongwon.kim@intel.com>

Having a fence linked to a virtio_gpu_framebuffer in the plane update
sequence would cause conflict when several planes referencing the same
framebuffer (e.g. Xorg screen covering multi-displays configured for an
extended mode) and those planes are updated concurrently. So it is needed
to allocate a fence for every plane state instead of the framebuffer.

Signed-off-by: Dongwon Kim <dongwon.kim@intel.com>
[dmitry.osipenko@collabora.com: rebase, fix up, edit commit message]
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 ++++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 58 +++++++++++++++++---------
 2 files changed, 46 insertions(+), 19 deletions(-)

Comments

Vivek Kasireddy Oct. 22, 2024, 4:44 a.m. UTC | #1
Hi Dmitry,

> Subject: [PATCH v3 2/2] drm/virtio: New fence for every plane update
> 
> From: Dongwon Kim <dongwon.kim@intel.com>
> 
> Having a fence linked to a virtio_gpu_framebuffer in the plane update
> sequence would cause conflict when several planes referencing the same
> framebuffer (e.g. Xorg screen covering multi-displays configured for an
> extended mode) and those planes are updated concurrently. So it is needed
> to allocate a fence for every plane state instead of the framebuffer.
> 
> Signed-off-by: Dongwon Kim <dongwon.kim@intel.com>
> [dmitry.osipenko@collabora.com: rebase, fix up, edit commit message]
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>  drivers/gpu/drm/virtio/virtgpu_drv.h   |  7 ++++
>  drivers/gpu/drm/virtio/virtgpu_plane.c | 58 +++++++++++++++++---------
>  2 files changed, 46 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h
> b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index 64c236169db8..5dc8eeaf7123 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -194,6 +194,13 @@ struct virtio_gpu_framebuffer {
>  #define to_virtio_gpu_framebuffer(x) \
>  	container_of(x, struct virtio_gpu_framebuffer, base)
> 
> +struct virtio_gpu_plane_state {
> +	struct drm_plane_state base;
> +	struct virtio_gpu_fence *fence;
> +};
> +#define to_virtio_gpu_plane_state(x) \
> +	container_of(x, struct virtio_gpu_plane_state, base)
> +
>  struct virtio_gpu_queue {
>  	struct virtqueue *vq;
>  	spinlock_t qlock;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c
> b/drivers/gpu/drm/virtio/virtgpu_plane.c
> index ab7232921cb7..2add67c6b66d 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_plane.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
> @@ -67,11 +67,28 @@ uint32_t virtio_gpu_translate_format(uint32_t
> drm_fourcc)
>  	return format;
>  }
> 
> +static struct
> +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane
> *plane)
> +{
> +	struct virtio_gpu_plane_state *new;
> +
> +	if (WARN_ON(!plane->state))
> +		return NULL;
> +
> +	new = kzalloc(sizeof(*new), GFP_KERNEL);
> +	if (!new)
> +		return NULL;
> +
> +	__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
> +
> +	return &new->base;
> +}
> +
>  static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
>  	.update_plane		= drm_atomic_helper_update_plane,
>  	.disable_plane		= drm_atomic_helper_disable_plane,
>  	.reset			= drm_atomic_helper_plane_reset,
> -	.atomic_duplicate_state =
> drm_atomic_helper_plane_duplicate_state,
> +	.atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
>  	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
>  };
> 
> @@ -139,11 +156,13 @@ static void virtio_gpu_resource_flush(struct
> drm_plane *plane,
>  	struct drm_device *dev = plane->dev;
>  	struct virtio_gpu_device *vgdev = dev->dev_private;
>  	struct virtio_gpu_framebuffer *vgfb;
> +	struct virtio_gpu_plane_state *vgplane_st;
>  	struct virtio_gpu_object *bo;
> 
>  	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
> +	vgplane_st = to_virtio_gpu_plane_state(plane->state);
>  	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
> -	if (vgfb->fence) {
> +	if (vgplane_st->fence) {
>  		struct virtio_gpu_object_array *objs;
> 
>  		objs = virtio_gpu_array_alloc(1);
> @@ -152,13 +171,11 @@ static void virtio_gpu_resource_flush(struct
> drm_plane *plane,
>  		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
>  		virtio_gpu_array_lock_resv(objs);
>  		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
> x, y,
> -					      width, height, objs, vgfb->fence);
> +					      width, height, objs,
> +					      vgplane_st->fence);
>  		virtio_gpu_notify(vgdev);
> -
> -		dma_fence_wait_timeout(&vgfb->fence->f, true,
> +		dma_fence_wait_timeout(&vgplane_st->fence->f, true,
>  				       msecs_to_jiffies(50));
> -		dma_fence_put(&vgfb->fence->f);
> -		vgfb->fence = NULL;
Not sure if it makes any difference but would there be a problem if you unref
the fence here (existing behavior) instead of deferring it until cleanup?

>  	} else {
>  		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
> x, y,
>  					      width, height, NULL, NULL);
> @@ -248,12 +265,14 @@ static int virtio_gpu_plane_prepare_fb(struct
> drm_plane *plane,
>  	struct drm_device *dev = plane->dev;
>  	struct virtio_gpu_device *vgdev = dev->dev_private;
>  	struct virtio_gpu_framebuffer *vgfb;
> +	struct virtio_gpu_plane_state *vgplane_st;
>  	struct virtio_gpu_object *bo;
> 
>  	if (!new_state->fb)
>  		return 0;
> 
>  	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
> +	vgplane_st = to_virtio_gpu_plane_state(new_state);
>  	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
> 
>  	drm_gem_plane_helper_prepare_fb(plane, new_state);
> @@ -261,10 +280,11 @@ static int virtio_gpu_plane_prepare_fb(struct
> drm_plane *plane,
>  	if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo-
> >guest_blob))
>  		return 0;
> 
> -	if (bo->dumb && (plane->state->fb != new_state->fb)) {
> -		vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev-
> >fence_drv.context,
> +	if (bo->dumb) {
> +		vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
> +						     vgdev->fence_drv.context,
>  						     0);
> -		if (!vgfb->fence)
> +		if (!vgplane_st->fence)
>  			return -ENOMEM;
>  	}
> 
> @@ -274,15 +294,15 @@ static int virtio_gpu_plane_prepare_fb(struct
> drm_plane *plane,
>  static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
>  					struct drm_plane_state *state)
>  {
> -	struct virtio_gpu_framebuffer *vgfb;
> +	struct virtio_gpu_plane_state *vgplane_st;
> 
>  	if (!state->fb)
>  		return;
> 
> -	vgfb = to_virtio_gpu_framebuffer(state->fb);
> -	if (vgfb->fence) {
> -		dma_fence_put(&vgfb->fence->f);
> -		vgfb->fence = NULL;
> +	vgplane_st = to_virtio_gpu_plane_state(state);
> +	if (vgplane_st->fence) {
> +		dma_fence_put(&vgplane_st->fence->f);
> +		vgplane_st->fence = NULL;
>  	}
>  }
> 
> @@ -295,6 +315,7 @@ static void virtio_gpu_cursor_plane_update(struct
> drm_plane *plane,
>  	struct virtio_gpu_device *vgdev = dev->dev_private;
>  	struct virtio_gpu_output *output = NULL;
>  	struct virtio_gpu_framebuffer *vgfb;
> +	struct virtio_gpu_plane_state *vgplane_st;
>  	struct virtio_gpu_object *bo = NULL;
>  	uint32_t handle;
> 
> @@ -307,6 +328,7 @@ static void virtio_gpu_cursor_plane_update(struct
> drm_plane *plane,
> 
>  	if (plane->state->fb) {
>  		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
> +		vgplane_st = to_virtio_gpu_plane_state(plane->state);
>  		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
>  		handle = bo->hw_res_handle;
>  	} else {
> @@ -326,11 +348,9 @@ static void virtio_gpu_cursor_plane_update(struct
> drm_plane *plane,
>  			(vgdev, 0,
>  			 plane->state->crtc_w,
>  			 plane->state->crtc_h,
> -			 0, 0, objs, vgfb->fence);
> +			 0, 0, objs, vgplane_st->fence);
>  		virtio_gpu_notify(vgdev);
> -		dma_fence_wait(&vgfb->fence->f, true);
> -		dma_fence_put(&vgfb->fence->f);
> -		vgfb->fence = NULL;
Same comment as above.
Regardless, the patch LGTM.

Acked-by: Vivek Kasireddy <vivek.kasireddy@intel.com>

> +		dma_fence_wait(&vgplane_st->fence->f, true);
>  	}
> 
>  	if (plane->state->fb != old_state->fb) {
> --
> 2.47.0
Dmitry Osipenko Oct. 25, 2024, 5:59 p.m. UTC | #2
On 10/22/24 07:44, Kasireddy, Vivek wrote:
>>  		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
>> x, y,
>> -					      width, height, objs, vgfb->fence);
>> +					      width, height, objs,
>> +					      vgplane_st->fence);
>>  		virtio_gpu_notify(vgdev);
>> -
>> -		dma_fence_wait_timeout(&vgfb->fence->f, true,
>> +		dma_fence_wait_timeout(&vgplane_st->fence->f, true,
>>  				       msecs_to_jiffies(50));
>> -		dma_fence_put(&vgfb->fence->f);
>> -		vgfb->fence = NULL;
> Not sure if it makes any difference but would there be a problem if you unref
> the fence here (existing behavior) instead of deferring it until cleanup?

Previously fence was a part of virtio-gpu framebuffer, which was kind of
a hack. Maybe there was no better option back then, when this code was
written initially.

Now fence is a part of plane's atomic state, like it should be. We
shouldn't change atomic state at the commit time.

...
>> @@ -326,11 +348,9 @@ static void virtio_gpu_cursor_plane_update(struct
>> drm_plane *plane,
>>  			(vgdev, 0,
>>  			 plane->state->crtc_w,
>>  			 plane->state->crtc_h,
>> -			 0, 0, objs, vgfb->fence);
>> +			 0, 0, objs, vgplane_st->fence);
>>  		virtio_gpu_notify(vgdev);
>> -		dma_fence_wait(&vgfb->fence->f, true);
>> -		dma_fence_put(&vgfb->fence->f);
>> -		vgfb->fence = NULL;
> Same comment as above.
> Regardless, the patch LGTM.
> 
> Acked-by: Vivek Kasireddy <vivek.kasireddy@intel.com>

Thanks for the review :)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 64c236169db8..5dc8eeaf7123 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -194,6 +194,13 @@  struct virtio_gpu_framebuffer {
 #define to_virtio_gpu_framebuffer(x) \
 	container_of(x, struct virtio_gpu_framebuffer, base)
 
+struct virtio_gpu_plane_state {
+	struct drm_plane_state base;
+	struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+	container_of(x, struct virtio_gpu_plane_state, base)
+
 struct virtio_gpu_queue {
 	struct virtqueue *vq;
 	spinlock_t qlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ab7232921cb7..2add67c6b66d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -67,11 +67,28 @@  uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
 	return format;
 }
 
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct virtio_gpu_plane_state *new;
+
+	if (WARN_ON(!plane->state))
+		return NULL;
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return NULL;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+	return &new->base;
+}
+
 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
 	.update_plane		= drm_atomic_helper_update_plane,
 	.disable_plane		= drm_atomic_helper_disable_plane,
 	.reset			= drm_atomic_helper_plane_reset,
-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
 };
 
@@ -139,11 +156,13 @@  static void virtio_gpu_resource_flush(struct drm_plane *plane,
 	struct drm_device *dev = plane->dev;
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_plane_state *vgplane_st;
 	struct virtio_gpu_object *bo;
 
 	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+	vgplane_st = to_virtio_gpu_plane_state(plane->state);
 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-	if (vgfb->fence) {
+	if (vgplane_st->fence) {
 		struct virtio_gpu_object_array *objs;
 
 		objs = virtio_gpu_array_alloc(1);
@@ -152,13 +171,11 @@  static void virtio_gpu_resource_flush(struct drm_plane *plane,
 		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
 		virtio_gpu_array_lock_resv(objs);
 		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
-					      width, height, objs, vgfb->fence);
+					      width, height, objs,
+					      vgplane_st->fence);
 		virtio_gpu_notify(vgdev);
-
-		dma_fence_wait_timeout(&vgfb->fence->f, true,
+		dma_fence_wait_timeout(&vgplane_st->fence->f, true,
 				       msecs_to_jiffies(50));
-		dma_fence_put(&vgfb->fence->f);
-		vgfb->fence = NULL;
 	} else {
 		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
 					      width, height, NULL, NULL);
@@ -248,12 +265,14 @@  static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
 	struct drm_device *dev = plane->dev;
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_plane_state *vgplane_st;
 	struct virtio_gpu_object *bo;
 
 	if (!new_state->fb)
 		return 0;
 
 	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+	vgplane_st = to_virtio_gpu_plane_state(new_state);
 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
 
 	drm_gem_plane_helper_prepare_fb(plane, new_state);
@@ -261,10 +280,11 @@  static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
 	if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
 		return 0;
 
-	if (bo->dumb && (plane->state->fb != new_state->fb)) {
-		vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+	if (bo->dumb) {
+		vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+						     vgdev->fence_drv.context,
 						     0);
-		if (!vgfb->fence)
+		if (!vgplane_st->fence)
 			return -ENOMEM;
 	}
 
@@ -274,15 +294,15 @@  static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
 					struct drm_plane_state *state)
 {
-	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_plane_state *vgplane_st;
 
 	if (!state->fb)
 		return;
 
-	vgfb = to_virtio_gpu_framebuffer(state->fb);
-	if (vgfb->fence) {
-		dma_fence_put(&vgfb->fence->f);
-		vgfb->fence = NULL;
+	vgplane_st = to_virtio_gpu_plane_state(state);
+	if (vgplane_st->fence) {
+		dma_fence_put(&vgplane_st->fence->f);
+		vgplane_st->fence = NULL;
 	}
 }
 
@@ -295,6 +315,7 @@  static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_output *output = NULL;
 	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_plane_state *vgplane_st;
 	struct virtio_gpu_object *bo = NULL;
 	uint32_t handle;
 
@@ -307,6 +328,7 @@  static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 
 	if (plane->state->fb) {
 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+		vgplane_st = to_virtio_gpu_plane_state(plane->state);
 		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
 		handle = bo->hw_res_handle;
 	} else {
@@ -326,11 +348,9 @@  static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 			(vgdev, 0,
 			 plane->state->crtc_w,
 			 plane->state->crtc_h,
-			 0, 0, objs, vgfb->fence);
+			 0, 0, objs, vgplane_st->fence);
 		virtio_gpu_notify(vgdev);
-		dma_fence_wait(&vgfb->fence->f, true);
-		dma_fence_put(&vgfb->fence->f);
-		vgfb->fence = NULL;
+		dma_fence_wait(&vgplane_st->fence->f, true);
 	}
 
 	if (plane->state->fb != old_state->fb) {