diff mbox series

[RFC,6/6] drm/xe: Use dma_fence_preempt base class

Message ID 20241109172942.482630-7-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series Common preempt fences and semantics | expand

Commit Message

Matthew Brost Nov. 9, 2024, 5:29 p.m. UTC
Use the dma_fence_preempt base class in Xe instead of open-coding the
preemption implementation.

Cc: Dave Airlie <airlied@redhat.com>
Cc: Simona Vetter <simona.vetter@ffwll.ch>
Cc: Christian Koenig <christian.koenig@amd.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_guc_submit.c          |  3 +
 drivers/gpu/drm/xe/xe_hw_engine_group.c     |  4 +-
 drivers/gpu/drm/xe/xe_preempt_fence.c       | 81 ++++++---------------
 drivers/gpu/drm/xe/xe_preempt_fence.h       |  2 +-
 drivers/gpu/drm/xe/xe_preempt_fence_types.h | 11 +--
 5 files changed, 32 insertions(+), 69 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 9e0f86f3778b..0411ec9ed705 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1603,6 +1603,9 @@  static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
 	struct xe_guc *guc = exec_queue_to_guc(q);
 	int ret;
 
+	if (exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q))
+		return -ECANCELED;
+
 	/*
 	 * Likely don't need to check exec_queue_killed() as we clear
 	 * suspend_pending upon kill but to be paranoid but races in which
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 82750520a90a..8ed5410c3964 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -163,7 +163,7 @@  int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct x
 	if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) {
 		q->ops->suspend(q);
 		err = q->ops->suspend_wait(q);
-		if (err)
+		if (err == -ETIME)
 			goto err_suspend;
 
 		xe_hw_engine_group_resume_faulting_lr_jobs(group);
@@ -236,7 +236,7 @@  static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
 			continue;
 
 		err = q->ops->suspend_wait(q);
-		if (err)
+		if (err == -ETIME)
 			goto err_suspend;
 	}
 
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 83fbeea5aa20..34418454d65c 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -4,73 +4,40 @@ 
  */
 
 #include "xe_preempt_fence.h"
-
-#include <linux/slab.h>
-
 #include "xe_exec_queue.h"
 #include "xe_vm.h"
 
-static void preempt_fence_work_func(struct work_struct *w)
+static struct xe_exec_queue *to_exec_queue(struct dma_fence_preempt *fence)
 {
-	bool cookie = dma_fence_begin_signalling();
-	struct xe_preempt_fence *pfence =
-		container_of(w, typeof(*pfence), preempt_work);
-	struct xe_exec_queue *q = pfence->q;
-
-	if (pfence->error) {
-		dma_fence_set_error(&pfence->base, pfence->error);
-	} else if (!q->ops->reset_status(q)) {
-		int err = q->ops->suspend_wait(q);
-
-		if (err)
-			dma_fence_set_error(&pfence->base, err);
-	} else {
-		dma_fence_set_error(&pfence->base, -ENOENT);
-	}
-
-	dma_fence_signal(&pfence->base);
-	/*
-	 * Opt for keep everything in the fence critical section. This looks really strange since we
-	 * have just signalled the fence, however the preempt fences are all signalled via single
-	 * global ordered-wq, therefore anything that happens in this callback can easily block
-	 * progress on the entire wq, which itself may prevent other published preempt fences from
-	 * ever signalling.  Therefore try to keep everything here in the callback in the fence
-	 * critical section. For example if something below grabs a scary lock like vm->lock,
-	 * lockdep should complain since we also hold that lock whilst waiting on preempt fences to
-	 * complete.
-	 */
-	xe_vm_queue_rebind_worker(q->vm);
-	xe_exec_queue_put(q);
-	dma_fence_end_signalling(cookie);
+	return container_of(fence, struct xe_preempt_fence, base)->q;
 }
 
-static const char *
-preempt_fence_get_driver_name(struct dma_fence *fence)
+static int xe_preempt_fence_preempt(struct dma_fence_preempt *fence)
 {
-	return "xe";
+	struct xe_exec_queue *q = to_exec_queue(fence);
+
+	return q->ops->suspend(q);
 }
 
-static const char *
-preempt_fence_get_timeline_name(struct dma_fence *fence)
+static int xe_preempt_fence_preempt_wait(struct dma_fence_preempt *fence)
 {
-	return "preempt";
+	struct xe_exec_queue *q = to_exec_queue(fence);
+
+	return q->ops->suspend_wait(q);
 }
 
-static bool preempt_fence_enable_signaling(struct dma_fence *fence)
+static void xe_preempt_fence_preempt_finished(struct dma_fence_preempt *fence)
 {
-	struct xe_preempt_fence *pfence =
-		container_of(fence, typeof(*pfence), base);
-	struct xe_exec_queue *q = pfence->q;
+	struct xe_exec_queue *q = to_exec_queue(fence);
 
-	pfence->error = q->ops->suspend(q);
-	queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
-	return true;
+	xe_vm_queue_rebind_worker(q->vm);
+	xe_exec_queue_put(q);
 }
 
-static const struct dma_fence_ops preempt_fence_ops = {
-	.get_driver_name = preempt_fence_get_driver_name,
-	.get_timeline_name = preempt_fence_get_timeline_name,
-	.enable_signaling = preempt_fence_enable_signaling,
+static const struct dma_fence_preempt_ops xe_preempt_fence_ops = {
+	.preempt = xe_preempt_fence_preempt,
+	.preempt_wait = xe_preempt_fence_preempt_wait,
+	.preempt_finished = xe_preempt_fence_preempt_finished,
 };
 
 /**
@@ -95,7 +62,6 @@  struct xe_preempt_fence *xe_preempt_fence_alloc(void)
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&pfence->link);
-	INIT_WORK(&pfence->preempt_work, preempt_fence_work_func);
 
 	return pfence;
 }
@@ -134,11 +100,12 @@  xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
 {
 	list_del_init(&pfence->link);
 	pfence->q = xe_exec_queue_get(q);
-	spin_lock_init(&pfence->lock);
-	dma_fence_init(&pfence->base, &preempt_fence_ops,
-		      &pfence->lock, context, seqno);
 
-	return &pfence->base;
+	dma_fence_preempt_init(&pfence->base, &xe_preempt_fence_ops,
+			       xe_vm_resv(q->vm), q->vm->xe->preempt_fence_wq,
+			       context, seqno);
+
+	return &pfence->base.base;
 }
 
 /**
@@ -169,5 +136,5 @@  xe_preempt_fence_create(struct xe_exec_queue *q,
 
 bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
 {
-	return fence->ops == &preempt_fence_ops;
+	return dma_fence_is_preempt(fence);
 }
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.h b/drivers/gpu/drm/xe/xe_preempt_fence.h
index 9406c6fea525..7b56d12c0786 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.h
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.h
@@ -25,7 +25,7 @@  xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
 static inline struct xe_preempt_fence *
 to_preempt_fence(struct dma_fence *fence)
 {
-	return container_of(fence, struct xe_preempt_fence, base);
+	return container_of(fence, struct xe_preempt_fence, base.base);
 }
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
index 312c3372a49f..f12b89f7dc35 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
@@ -6,8 +6,7 @@ 
 #ifndef _XE_PREEMPT_FENCE_TYPES_H_
 #define _XE_PREEMPT_FENCE_TYPES_H_
 
-#include <linux/dma-fence.h>
-#include <linux/workqueue.h>
+#include <linux/dma-fence-preempt.h>
 
 struct xe_exec_queue;
 
@@ -18,17 +17,11 @@  struct xe_exec_queue;
  */
 struct xe_preempt_fence {
 	/** @base: dma fence base */
-	struct dma_fence base;
+	struct dma_fence_preempt base;
 	/** @link: link into list of pending preempt fences */
 	struct list_head link;
 	/** @q: exec queue for this preempt fence */
 	struct xe_exec_queue *q;
-	/** @preempt_work: work struct which issues preemption */
-	struct work_struct preempt_work;
-	/** @lock: dma-fence fence lock */
-	spinlock_t lock;
-	/** @error: preempt fence is in error state */
-	int error;
 };
 
 #endif