diff mbox series

[2/7] drm/qxl: switch to using drm_exec v2

Message ID 20241114153020.6209-3-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [1/7] drm/radeon: switch over to drm_exec v2 | expand

Commit Message

Christian König Nov. 14, 2024, 3:30 p.m. UTC
Just a straightforward conversion without any optimization.

Only compile tested for now.

v2: rebase

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/qxl/Kconfig       |  1 +
 drivers/gpu/drm/qxl/qxl_drv.h     |  7 ++--
 drivers/gpu/drm/qxl/qxl_release.c | 68 ++++++++++++++++---------------
 3 files changed, 40 insertions(+), 36 deletions(-)

Comments

Alex Deucher Nov. 14, 2024, 8:53 p.m. UTC | #1
On Thu, Nov 14, 2024 at 10:44 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Just a straightforward conversion without any optimization.
>
> Only compile tested for now.
>
> v2: rebase
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/qxl/Kconfig       |  1 +
>  drivers/gpu/drm/qxl/qxl_drv.h     |  7 ++--
>  drivers/gpu/drm/qxl/qxl_release.c | 68 ++++++++++++++++---------------
>  3 files changed, 40 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
> index 1992df4a82d2..ebf452aa1e80 100644
> --- a/drivers/gpu/drm/qxl/Kconfig
> +++ b/drivers/gpu/drm/qxl/Kconfig
> @@ -6,6 +6,7 @@ config DRM_QXL
>         select DRM_KMS_HELPER
>         select DRM_TTM
>         select DRM_TTM_HELPER
> +       select DRM_EXEC
>         select CRC32
>         help
>           QXL virtual GPU for Spice virtualization desktop integration.
> diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
> index 32069acd93f8..b5fc14c9525d 100644
> --- a/drivers/gpu/drm/qxl/qxl_drv.h
> +++ b/drivers/gpu/drm/qxl/qxl_drv.h
> @@ -38,12 +38,12 @@
>
>  #include <drm/drm_crtc.h>
>  #include <drm/drm_encoder.h>
> +#include <drm/drm_exec.h>
>  #include <drm/drm_gem_ttm_helper.h>
>  #include <drm/drm_ioctl.h>
>  #include <drm/drm_gem.h>
>  #include <drm/qxl_drm.h>
>  #include <drm/ttm/ttm_bo.h>
> -#include <drm/ttm/ttm_execbuf_util.h>
>  #include <drm/ttm/ttm_placement.h>
>
>  #include "qxl_dev.h"
> @@ -101,7 +101,8 @@ struct qxl_gem {
>  };
>
>  struct qxl_bo_list {
> -       struct ttm_validate_buffer tv;
> +       struct qxl_bo           *bo;
> +       struct list_head        list;
>  };
>
>  struct qxl_crtc {
> @@ -150,7 +151,7 @@ struct qxl_release {
>         struct qxl_bo *release_bo;
>         uint32_t release_offset;
>         uint32_t surface_release_id;
> -       struct ww_acquire_ctx ticket;
> +       struct drm_exec exec;
>         struct list_head bos;
>  };
>
> diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
> index 368d26da0d6a..05204a6a3fa8 100644
> --- a/drivers/gpu/drm/qxl/qxl_release.c
> +++ b/drivers/gpu/drm/qxl/qxl_release.c
> @@ -121,13 +121,11 @@ qxl_release_free_list(struct qxl_release *release)
>  {
>         while (!list_empty(&release->bos)) {
>                 struct qxl_bo_list *entry;
> -               struct qxl_bo *bo;
>
>                 entry = container_of(release->bos.next,
> -                                    struct qxl_bo_list, tv.head);
> -               bo = to_qxl_bo(entry->tv.bo);
> -               qxl_bo_unref(&bo);
> -               list_del(&entry->tv.head);
> +                                    struct qxl_bo_list, list);
> +               qxl_bo_unref(&entry->bo);
> +               list_del(&entry->list);
>                 kfree(entry);
>         }
>         release->release_bo = NULL;
> @@ -172,8 +170,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
>  {
>         struct qxl_bo_list *entry;
>
> -       list_for_each_entry(entry, &release->bos, tv.head) {
> -               if (entry->tv.bo == &bo->tbo)
> +       list_for_each_entry(entry, &release->bos, list) {
> +               if (entry->bo == bo)
>                         return 0;
>         }
>
> @@ -182,9 +180,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
>                 return -ENOMEM;
>
>         qxl_bo_ref(bo);
> -       entry->tv.bo = &bo->tbo;
> -       entry->tv.num_shared = 0;
> -       list_add_tail(&entry->tv.head, &release->bos);
> +       entry->bo = bo;
> +       list_add_tail(&entry->list, &release->bos);
>         return 0;
>  }
>
> @@ -221,21 +218,28 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
>         if (list_is_singular(&release->bos))
>                 return 0;
>
> -       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
> -                                    !no_intr, NULL);
> -       if (ret)
> -               return ret;
> -
> -       list_for_each_entry(entry, &release->bos, tv.head) {
> -               struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
> -
> -               ret = qxl_release_validate_bo(bo);
> -               if (ret) {
> -                       ttm_eu_backoff_reservation(&release->ticket, &release->bos);
> -                       return ret;
> +       drm_exec_init(&release->exec, no_intr ? 0 :
> +                     DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
> +       drm_exec_until_all_locked(&release->exec) {
> +               list_for_each_entry(entry, &release->bos, list) {
> +                       ret = drm_exec_prepare_obj(&release->exec,
> +                                                  &entry->bo->tbo.base,
> +                                                  1);
> +                       drm_exec_retry_on_contention(&release->exec);
> +                       if (ret)
> +                               goto error;
>                 }
>         }
> +
> +       list_for_each_entry(entry, &release->bos, list) {
> +               ret = qxl_release_validate_bo(entry->bo);
> +               if (ret)
> +                       goto error;
> +       }
>         return 0;
> +error:
> +       drm_exec_fini(&release->exec);
> +       return ret;
>  }
>
>  void qxl_release_backoff_reserve_list(struct qxl_release *release)
> @@ -245,7 +249,7 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
>         if (list_is_singular(&release->bos))
>                 return;
>
> -       ttm_eu_backoff_reservation(&release->ticket, &release->bos);
> +       drm_exec_fini(&release->exec);
>  }
>
>  int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
> @@ -404,18 +408,18 @@ void qxl_release_unmap(struct qxl_device *qdev,
>
>  void qxl_release_fence_buffer_objects(struct qxl_release *release)
>  {
> -       struct ttm_buffer_object *bo;
>         struct ttm_device *bdev;
> -       struct ttm_validate_buffer *entry;
> +       struct qxl_bo_list *entry;
>         struct qxl_device *qdev;
> +       struct qxl_bo *bo;
>
>         /* if only one object on the release its the release itself
>            since these objects are pinned no need to reserve */
>         if (list_is_singular(&release->bos) || list_empty(&release->bos))
>                 return;
>
> -       bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
> -       bdev = bo->bdev;
> +       bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
> +       bdev = bo->tbo.bdev;
>         qdev = container_of(bdev, struct qxl_device, mman.bdev);
>
>         /*
> @@ -426,14 +430,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
>                        release->id | 0xf0000000, release->base.seqno);
>         trace_dma_fence_emit(&release->base);
>
> -       list_for_each_entry(entry, &release->bos, head) {
> +       list_for_each_entry(entry, &release->bos, list) {
>                 bo = entry->bo;
>
> -               dma_resv_add_fence(bo->base.resv, &release->base,
> +               dma_resv_add_fence(bo->tbo.base.resv, &release->base,
>                                    DMA_RESV_USAGE_READ);
> -               ttm_bo_move_to_lru_tail_unlocked(bo);
> -               dma_resv_unlock(bo->base.resv);
> +               ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
>         }
> -       ww_acquire_fini(&release->ticket);
> +       drm_exec_fini(&release->exec);
>  }
> -
> --
> 2.34.1
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 1992df4a82d2..ebf452aa1e80 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@  config DRM_QXL
 	select DRM_KMS_HELPER
 	select DRM_TTM
 	select DRM_TTM_HELPER
+	select DRM_EXEC
 	select CRC32
 	help
 	  QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 32069acd93f8..b5fc14c9525d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,12 +38,12 @@ 
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_exec.h>
 #include <drm/drm_gem_ttm_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_gem.h>
 #include <drm/qxl_drm.h>
 #include <drm/ttm/ttm_bo.h>
-#include <drm/ttm/ttm_execbuf_util.h>
 #include <drm/ttm/ttm_placement.h>
 
 #include "qxl_dev.h"
@@ -101,7 +101,8 @@  struct qxl_gem {
 };
 
 struct qxl_bo_list {
-	struct ttm_validate_buffer tv;
+	struct qxl_bo		*bo;
+	struct list_head	list;
 };
 
 struct qxl_crtc {
@@ -150,7 +151,7 @@  struct qxl_release {
 	struct qxl_bo *release_bo;
 	uint32_t release_offset;
 	uint32_t surface_release_id;
-	struct ww_acquire_ctx ticket;
+	struct drm_exec	exec;
 	struct list_head bos;
 };
 
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a..05204a6a3fa8 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -121,13 +121,11 @@  qxl_release_free_list(struct qxl_release *release)
 {
 	while (!list_empty(&release->bos)) {
 		struct qxl_bo_list *entry;
-		struct qxl_bo *bo;
 
 		entry = container_of(release->bos.next,
-				     struct qxl_bo_list, tv.head);
-		bo = to_qxl_bo(entry->tv.bo);
-		qxl_bo_unref(&bo);
-		list_del(&entry->tv.head);
+				     struct qxl_bo_list, list);
+		qxl_bo_unref(&entry->bo);
+		list_del(&entry->list);
 		kfree(entry);
 	}
 	release->release_bo = NULL;
@@ -172,8 +170,8 @@  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 {
 	struct qxl_bo_list *entry;
 
-	list_for_each_entry(entry, &release->bos, tv.head) {
-		if (entry->tv.bo == &bo->tbo)
+	list_for_each_entry(entry, &release->bos, list) {
+		if (entry->bo == bo)
 			return 0;
 	}
 
@@ -182,9 +180,8 @@  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 		return -ENOMEM;
 
 	qxl_bo_ref(bo);
-	entry->tv.bo = &bo->tbo;
-	entry->tv.num_shared = 0;
-	list_add_tail(&entry->tv.head, &release->bos);
+	entry->bo = bo;
+	list_add_tail(&entry->list, &release->bos);
 	return 0;
 }
 
@@ -221,21 +218,28 @@  int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
 	if (list_is_singular(&release->bos))
 		return 0;
 
-	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
-				     !no_intr, NULL);
-	if (ret)
-		return ret;
-
-	list_for_each_entry(entry, &release->bos, tv.head) {
-		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
-
-		ret = qxl_release_validate_bo(bo);
-		if (ret) {
-			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
-			return ret;
+	drm_exec_init(&release->exec, no_intr ? 0 :
+		      DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+	drm_exec_until_all_locked(&release->exec) {
+		list_for_each_entry(entry, &release->bos, list) {
+			ret = drm_exec_prepare_obj(&release->exec,
+						   &entry->bo->tbo.base,
+						   1);
+			drm_exec_retry_on_contention(&release->exec);
+			if (ret)
+				goto error;
 		}
 	}
+
+	list_for_each_entry(entry, &release->bos, list) {
+		ret = qxl_release_validate_bo(entry->bo);
+		if (ret)
+			goto error;
+	}
 	return 0;
+error:
+	drm_exec_fini(&release->exec);
+	return ret;
 }
 
 void qxl_release_backoff_reserve_list(struct qxl_release *release)
@@ -245,7 +249,7 @@  void qxl_release_backoff_reserve_list(struct qxl_release *release)
 	if (list_is_singular(&release->bos))
 		return;
 
-	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+	drm_exec_fini(&release->exec);
 }
 
 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
@@ -404,18 +408,18 @@  void qxl_release_unmap(struct qxl_device *qdev,
 
 void qxl_release_fence_buffer_objects(struct qxl_release *release)
 {
-	struct ttm_buffer_object *bo;
 	struct ttm_device *bdev;
-	struct ttm_validate_buffer *entry;
+	struct qxl_bo_list *entry;
 	struct qxl_device *qdev;
+	struct qxl_bo *bo;
 
 	/* if only one object on the release its the release itself
 	   since these objects are pinned no need to reserve */
 	if (list_is_singular(&release->bos) || list_empty(&release->bos))
 		return;
 
-	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
-	bdev = bo->bdev;
+	bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
+	bdev = bo->tbo.bdev;
 	qdev = container_of(bdev, struct qxl_device, mman.bdev);
 
 	/*
@@ -426,14 +430,12 @@  void qxl_release_fence_buffer_objects(struct qxl_release *release)
 		       release->id | 0xf0000000, release->base.seqno);
 	trace_dma_fence_emit(&release->base);
 
-	list_for_each_entry(entry, &release->bos, head) {
+	list_for_each_entry(entry, &release->bos, list) {
 		bo = entry->bo;
 
-		dma_resv_add_fence(bo->base.resv, &release->base,
+		dma_resv_add_fence(bo->tbo.base.resv, &release->base,
 				   DMA_RESV_USAGE_READ);
-		ttm_bo_move_to_lru_tail_unlocked(bo);
-		dma_resv_unlock(bo->base.resv);
+		ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
 	}
-	ww_acquire_fini(&release->ticket);
+	drm_exec_fini(&release->exec);
 }
-