diff mbox series

[09/10] drm/qxl: map/unmap framebuffers in prepare_fb+cleanup_fb callbacks.

Message ID 20210216113716.716996-10-kraxel@redhat.com (mailing list archive)
State New, archived
Headers show
Series drm/qxl: a collection of fixes | expand

Commit Message

Gerd Hoffmann Feb. 16, 2021, 11:37 a.m. UTC
We don't have to map in atomic_update callback then,
making locking a bit less complicated.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 drivers/gpu/drm/qxl/qxl_display.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

Comments

Thomas Zimmermann Feb. 16, 2021, 1:27 p.m. UTC | #1
Hi

this is a shadow-buffered plane. Did you consider using the new helpers 
for shadow-buffered planes? They will map the user BO for you and 
provide the mapping in the plane state.

 From there, you should implement your own plane state on top of struct 
drm_shadow_plane_state, and also move all the other allocations and 
vmaps into prepare_fb and cleanup_fb. Most of this is not actually 
allowed in commit tails. All we'd have to do is to export the reset, 
duplicate and destroy code; similar to what 
__drm_atomic_helper_plane_reset() does.

Best regards
Thomas


Am 16.02.21 um 12:37 schrieb Gerd Hoffmann:
> We don't have to map in atomic_update callback then,
> making locking a bit less complicated.
> 
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> ---
>   drivers/gpu/drm/qxl/qxl_display.c | 14 +++++---------
>   1 file changed, 5 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
> index 7500560db8e4..39b8c5116d34 100644
> --- a/drivers/gpu/drm/qxl/qxl_display.c
> +++ b/drivers/gpu/drm/qxl/qxl_display.c
> @@ -584,7 +584,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
>   	struct drm_gem_object *obj;
>   	struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
>   	int ret;
> -	struct dma_buf_map user_map;
>   	struct dma_buf_map cursor_map;
>   	void *user_ptr;
>   	int size = 64*64*4;
> @@ -599,11 +598,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
>   		obj = fb->obj[0];
>   		user_bo = gem_to_qxl_bo(obj);
>   
> -		/* pinning is done in the prepare/cleanup framevbuffer */
> -		ret = qxl_bo_kmap_locked(user_bo, &user_map);
> -		if (ret)
> -			goto out_free_release;
> -		user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */
> +		/* mapping is done in the prepare/cleanup framevbuffer */
> +		user_ptr = user_bo->map.vaddr; /* TODO: Use mapping abstraction properly */
>   
>   		ret = qxl_alloc_bo_reserved(qdev, release,
>   					    sizeof(struct qxl_cursor) + size,
> @@ -639,7 +635,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
>   		cursor->chunk.data_size = size;
>   		memcpy(cursor->chunk.data, user_ptr, size);
>   		qxl_bo_kunmap_locked(cursor_bo);
> -		qxl_bo_kunmap_locked(user_bo);
>   
>   		cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
>   		cmd->u.set.visible = 1;
> @@ -778,6 +773,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
>   	struct drm_gem_object *obj;
>   	struct qxl_bo *user_bo;
>   	struct qxl_surface surf;
> +	struct dma_buf_map unused;
>   
>   	if (!new_state->fb)
>   		return 0;
> @@ -815,7 +811,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
>   		}
>   	}
>   
> -	return qxl_bo_pin(user_bo);
> +	return qxl_bo_kmap(user_bo, &unused);
>   }
>   
>   static void qxl_plane_cleanup_fb(struct drm_plane *plane,
> @@ -834,7 +830,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
>   
>   	obj = old_state->fb->obj[0];
>   	user_bo = gem_to_qxl_bo(obj);
> -	qxl_bo_unpin(user_bo);
> +	qxl_bo_kunmap(user_bo);
>   
>   	if (old_state->fb != plane->state->fb && user_bo->shadow) {
>   		qxl_bo_unpin(user_bo->shadow);
>
Thomas Zimmermann Feb. 16, 2021, 1:46 p.m. UTC | #2
Am 16.02.21 um 14:27 schrieb Thomas Zimmermann:
> Hi
> 
> this is a shadow-buffered plane. Did you consider using the new helpers 
> for shadow-buffered planes? They will map the user BO for you and 
> provide the mapping in the plane state.
> 
>  From there, you should implement your own plane state on top of struct 
> drm_shadow_plane_state, and also move all the other allocations and 
> vmaps into prepare_fb and cleanup_fb. Most of this is not actually 
> allowed in commit tails. All we'd have to do is to export the reset, 
> duplicate and destroy code; similar to what 
> __drm_atomic_helper_plane_reset() does.

AFAICT the cursor_bo is used to implement double buffering for the 
cursor image.

Ideally, you can do what ast does: pre-allocate/vmap 2 BOs at the end of 
the vram. Then pageflip between them in atomic_update(). Resolves all 
the allocation and mapping headaches.

Best regards
Thomas

> 
> Best regards
> Thomas
> 
> 
> Am 16.02.21 um 12:37 schrieb Gerd Hoffmann:
>> We don't have to map in atomic_update callback then,
>> making locking a bit less complicated.
>>
>> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
>> ---
>>   drivers/gpu/drm/qxl/qxl_display.c | 14 +++++---------
>>   1 file changed, 5 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/qxl/qxl_display.c 
>> b/drivers/gpu/drm/qxl/qxl_display.c
>> index 7500560db8e4..39b8c5116d34 100644
>> --- a/drivers/gpu/drm/qxl/qxl_display.c
>> +++ b/drivers/gpu/drm/qxl/qxl_display.c
>> @@ -584,7 +584,6 @@ static void qxl_cursor_atomic_update(struct 
>> drm_plane *plane,
>>       struct drm_gem_object *obj;
>>       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo 
>> = NULL;
>>       int ret;
>> -    struct dma_buf_map user_map;
>>       struct dma_buf_map cursor_map;
>>       void *user_ptr;
>>       int size = 64*64*4;
>> @@ -599,11 +598,8 @@ static void qxl_cursor_atomic_update(struct 
>> drm_plane *plane,
>>           obj = fb->obj[0];
>>           user_bo = gem_to_qxl_bo(obj);
>> -        /* pinning is done in the prepare/cleanup framevbuffer */
>> -        ret = qxl_bo_kmap_locked(user_bo, &user_map);
>> -        if (ret)
>> -            goto out_free_release;
>> -        user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction 
>> properly */
>> +        /* mapping is done in the prepare/cleanup framevbuffer */
>> +        user_ptr = user_bo->map.vaddr; /* TODO: Use mapping 
>> abstraction properly */
>>           ret = qxl_alloc_bo_reserved(qdev, release,
>>                           sizeof(struct qxl_cursor) + size,
>> @@ -639,7 +635,6 @@ static void qxl_cursor_atomic_update(struct 
>> drm_plane *plane,
>>           cursor->chunk.data_size = size;
>>           memcpy(cursor->chunk.data, user_ptr, size);
>>           qxl_bo_kunmap_locked(cursor_bo);
>> -        qxl_bo_kunmap_locked(user_bo);
>>           cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
>>           cmd->u.set.visible = 1;
>> @@ -778,6 +773,7 @@ static int qxl_plane_prepare_fb(struct drm_plane 
>> *plane,
>>       struct drm_gem_object *obj;
>>       struct qxl_bo *user_bo;
>>       struct qxl_surface surf;
>> +    struct dma_buf_map unused;
>>       if (!new_state->fb)
>>           return 0;
>> @@ -815,7 +811,7 @@ static int qxl_plane_prepare_fb(struct drm_plane 
>> *plane,
>>           }
>>       }
>> -    return qxl_bo_pin(user_bo);
>> +    return qxl_bo_kmap(user_bo, &unused);
>>   }
>>   static void qxl_plane_cleanup_fb(struct drm_plane *plane,
>> @@ -834,7 +830,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane 
>> *plane,
>>       obj = old_state->fb->obj[0];
>>       user_bo = gem_to_qxl_bo(obj);
>> -    qxl_bo_unpin(user_bo);
>> +    qxl_bo_kunmap(user_bo);
>>       if (old_state->fb != plane->state->fb && user_bo->shadow) {
>>           qxl_bo_unpin(user_bo->shadow);
>>
> 
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
Gerd Hoffmann Feb. 17, 2021, 10:02 a.m. UTC | #3
On Tue, Feb 16, 2021 at 02:46:21PM +0100, Thomas Zimmermann wrote:
> 
> 
> Am 16.02.21 um 14:27 schrieb Thomas Zimmermann:
> > Hi
> > 
> > this is a shadow-buffered plane. Did you consider using the new helpers
> > for shadow-buffered planes? They will map the user BO for you and
> > provide the mapping in the plane state.
> > 
> >  From there, you should implement your own plane state on top of struct
> > drm_shadow_plane_state, and also move all the other allocations and
> > vmaps into prepare_fb and cleanup_fb. Most of this is not actually
> > allowed in commit tails. All we'd have to do is to export the reset,
> > duplicate and destroy code; similar to what
> > __drm_atomic_helper_plane_reset() does.
> 
> AFAICT the cursor_bo is used to implement double buffering for the cursor
> image.
> 
> Ideally, you can do what ast does: pre-allocate/vmap 2 BOs at the end of the
> vram. Then pageflip between them in atomic_update(). Resolves all the
> allocation and mapping headaches.

Just waded through the ast patches.

It is not that simple for qxl.  You have to send a command to the
virtualization host and take care of the host accessing that memory
when processing the command, so you can't reuse the memory until the
host signals it is fine to do so.

But, yes, it should be possible to handle cursor_bo creation in
prepare_fb without too much effort.

take care,
  Gerd
Thomas Zimmermann Feb. 17, 2021, 10:23 a.m. UTC | #4
Hi

Am 17.02.21 um 11:02 schrieb Gerd Hoffmann:
> On Tue, Feb 16, 2021 at 02:46:21PM +0100, Thomas Zimmermann wrote:
>>
>>
>> Am 16.02.21 um 14:27 schrieb Thomas Zimmermann:
>>> Hi
>>>
>>> this is a shadow-buffered plane. Did you consider using the new helpers
>>> for shadow-buffered planes? They will map the user BO for you and
>>> provide the mapping in the plane state.
>>>
>>>   From there, you should implement your own plane state on top of struct
>>> drm_shadow_plane_state, and also move all the other allocations and
>>> vmaps into prepare_fb and cleanup_fb. Most of this is not actually
>>> allowed in commit tails. All we'd have to do is to export the reset,
>>> duplicate and destroy code; similar to what
>>> __drm_atomic_helper_plane_reset() does.
>>
>> AFAICT the cursor_bo is used to implement double buffering for the cursor
>> image.
>>
>> Ideally, you can do what ast does: pre-allocate/vmap 2 BOs at the end of the
>> vram. Then pageflip between them in atomic_update(). Resolves all the
>> allocation and mapping headaches.
> 
> Just waded through the ast patches.

I just received your ack. Thanks a lot for looking at the ast patches.

> 
> It is not that simple for qxl.  You have to send a command to the
> virtualization host and take care of the host accessing that memory
> when processing the command, so you can't reuse the memory until the
> host signals it is fine to do so.
> 
> But, yes, it should be possible to handle cursor_bo creation in
> prepare_fb without too much effort.

I've been thinking about this issue and here's an idea:

If you take the ast code as a blueprint, you'd store two cursor bo in a 
cursor-plane structure. Aditionally each of these BOs would have a 
pointer to a fence associated with it.

One idea for the fencing code would be to allocate each new fence in 
prepare_fb and store it in the cursor plane state. In atomic_update, 
pick the unused BO in the cursor plane and wait on its fence. This 
should guarantee that the BO is available. (?) Then swap the BO's fence 
with the one in the cursor plane state. Setup the new fence for 
synchronization with the host. Next time you pick this cursor BO, the 
fence will be there for synchronization. The old fence from the cursor 
BO will now be stored in the cursor-plane state and can be freed in 
cleanup_fb().

My main interest here is to move all fail-able/locking calls out of the 
atomic_update function. I might be missing some crucial corner case, but 
this should resolve the issue. (?) In any case, it's maybe worth a 
separate patchset.

Best regards
Thomas

> 
> take care,
>    Gerd
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7500560db8e4..39b8c5116d34 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -584,7 +584,6 @@  static void qxl_cursor_atomic_update(struct drm_plane *plane,
 	struct drm_gem_object *obj;
 	struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
 	int ret;
-	struct dma_buf_map user_map;
 	struct dma_buf_map cursor_map;
 	void *user_ptr;
 	int size = 64*64*4;
@@ -599,11 +598,8 @@  static void qxl_cursor_atomic_update(struct drm_plane *plane,
 		obj = fb->obj[0];
 		user_bo = gem_to_qxl_bo(obj);
 
-		/* pinning is done in the prepare/cleanup framevbuffer */
-		ret = qxl_bo_kmap_locked(user_bo, &user_map);
-		if (ret)
-			goto out_free_release;
-		user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */
+		/* mapping is done in the prepare/cleanup framevbuffer */
+		user_ptr = user_bo->map.vaddr; /* TODO: Use mapping abstraction properly */
 
 		ret = qxl_alloc_bo_reserved(qdev, release,
 					    sizeof(struct qxl_cursor) + size,
@@ -639,7 +635,6 @@  static void qxl_cursor_atomic_update(struct drm_plane *plane,
 		cursor->chunk.data_size = size;
 		memcpy(cursor->chunk.data, user_ptr, size);
 		qxl_bo_kunmap_locked(cursor_bo);
-		qxl_bo_kunmap_locked(user_bo);
 
 		cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
 		cmd->u.set.visible = 1;
@@ -778,6 +773,7 @@  static int qxl_plane_prepare_fb(struct drm_plane *plane,
 	struct drm_gem_object *obj;
 	struct qxl_bo *user_bo;
 	struct qxl_surface surf;
+	struct dma_buf_map unused;
 
 	if (!new_state->fb)
 		return 0;
@@ -815,7 +811,7 @@  static int qxl_plane_prepare_fb(struct drm_plane *plane,
 		}
 	}
 
-	return qxl_bo_pin(user_bo);
+	return qxl_bo_kmap(user_bo, &unused);
 }
 
 static void qxl_plane_cleanup_fb(struct drm_plane *plane,
@@ -834,7 +830,7 @@  static void qxl_plane_cleanup_fb(struct drm_plane *plane,
 
 	obj = old_state->fb->obj[0];
 	user_bo = gem_to_qxl_bo(obj);
-	qxl_bo_unpin(user_bo);
+	qxl_bo_kunmap(user_bo);
 
 	if (old_state->fb != plane->state->fb && user_bo->shadow) {
 		qxl_bo_unpin(user_bo->shadow);