diff mbox

[v2,11/12] drm/i915/guc: Preemption! With GuC

Message ID 20171009145258.23303-12-michal.winiarski@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michał Winiarski Oct. 9, 2017, 2:52 p.m. UTC
Pretty similar to what we have on execlists.
We're reusing most of the GEM code, however, due to GuC quirks we need a
couple of extra bits.
Preemption is implemented as GuC action, and actions can be pretty slow.
Because of that, we're using a mutex to serialize them. Since we're
requesting preemption from the tasklet, the task of creating a workitem
and wrapping it in GuC action is delegated to a worker.

To distinguish that preemption has finished, we're using additional
piece of HWSP, and since we're not getting context switch interrupts,
we're also adding a user interrupt.

The fact that our special preempt context has completed unfortunately
doesn't mean that we're ready to submit new work. We also need to wait
for GuC to finish its own processing.

v2: Don't compile out the wait for GuC, handle workqueue flush on reset,
no need for ordered workqueue, put on a reviewer hat when looking at my own
patches (Chris)
Move struct work around in intel_guc, move user interruput outside of
conditional (Michał)
Keep ring around rather than chase though intel_context

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c            |   3 +-
 drivers/gpu/drm/i915/i915_gem.c            |  10 ++
 drivers/gpu/drm/i915/i915_guc_submission.c | 141 +++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/intel_guc.h           |   2 +
 drivers/gpu/drm/i915/intel_lrc.c           |   4 +-
 drivers/gpu/drm/i915/intel_lrc.h           |   1 -
 drivers/gpu/drm/i915/intel_ringbuffer.h    |   9 ++
 7 files changed, 156 insertions(+), 14 deletions(-)

Comments

Chris Wilson Oct. 9, 2017, 8:32 p.m. UTC | #1
Quoting Michał Winiarski (2017-10-09 15:52:57)
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 947af576563b..418451755145 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -558,6 +558,89 @@ static void guc_add_request(struct intel_guc *guc,
>         spin_unlock(&client->wq_lock);
>  }
>  
> +#define GUC_PREEMPT_FINISHED 0x1
> +#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
> +static void inject_preempt_context(struct work_struct *work)
> +{
> +       struct intel_engine_cs *engine =
> +               container_of(work, typeof(*engine), guc_preempt_work);
> +       struct drm_i915_private *dev_priv = engine->i915;
> +       struct intel_guc *guc = &dev_priv->guc;
> +       struct i915_guc_client *client = guc->client[PREEMPT];
> +       struct intel_ring *ring = client->owner->engine[engine->id].ring;
> +       u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
> +                                                                engine));
> +       u32 *cs = ring->vaddr + ring->tail;
> +       u32 data[7];
> +
> +       if (engine->id == RCS) {
> +               cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED,
> +                               intel_hws_preempt_done_address(engine));
> +       } else {
> +               cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED,
> +                               intel_hws_preempt_done_address(engine));
> +               *cs++ = MI_NOOP;
> +               *cs++ = MI_NOOP;
> +       }
> +       *cs++ = MI_USER_INTERRUPT;
> +       *cs++ = MI_NOOP;
> +
> +       GEM_BUG_ON(!IS_ALIGNED(ring->size,
> +                              GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)));
> +       GEM_BUG_ON((void*)cs - (ring->vaddr + ring->tail) !=
> +                  GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32));
> +
> +       ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32);
> +       ring->tail &= (ring->size - 1);
> +
> +       if (i915_vma_is_map_and_fenceable(ring->vma))
> +               POSTING_READ_FW(GUC_STATUS);
> +
> +       spin_lock_irq(&client->wq_lock);
> +       guc_wq_item_append(client, engine->guc_id, ctx_desc,
> +                          ring->tail / sizeof(u64), 0);
> +       spin_unlock_irq(&client->wq_lock);
> +
> +       data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
> +       data[1] = client->stage_id;
> +       data[2] = INTEL_GUC_PREEMPT_OPTION_IMMEDIATE |
> +                 INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
> +                 INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
> +       data[3] = engine->guc_id;
> +       data[4] = guc->client[SUBMIT]->priority;
> +       data[5] = guc->client[SUBMIT]->stage_id;
> +       data[6] = guc_ggtt_offset(guc->shared_data);
> +
> +       if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {

Not speaking from experience or anything... But if this fails once, it's
likely to keep failing! Ergo WARN_ON_ONCE.

Broxton:
[   14.217681] [drm] INTEL_GUC_SEND: Action 0x2 failed; ret=-5 status=0xF000F000 response=0x00000010

Helpful?
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 78120a5d40f9..1d356f5c252c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -373,8 +373,7 @@  static int i915_getparam(struct drm_device *dev, void *data,
 			value |= I915_SCHEDULER_CAP_PRIORITY;
 
 			if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
-			    i915_modparams.enable_execlists &&
-			    !i915_modparams.enable_guc_submission)
+			    i915_modparams.enable_execlists)
 				value |= I915_SCHEDULER_CAP_PREEMPTION;
 		}
 		break;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6c9f0a151d0f..e7d1d91cf8a4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2854,6 +2854,16 @@  i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 	tasklet_kill(&engine->execlists.irq_tasklet);
 	tasklet_disable(&engine->execlists.irq_tasklet);
 
+	/*
+	 * We're using worker to queue preemption requests from the tasklet in
+	 * GuC submission mode.
+	 * Even though tasklet was disabled, we may still have a worker queued.
+	 * Let's make sure that all workers scheduled before disabling the
+	 * tasklet are completed before continuing with the reset.
+	 */
+	if (i915_modparams.enable_guc_submission)
+		flush_workqueue(engine->i915->guc.preempt_wq);
+
 	if (engine->irq_seqno_barrier)
 		engine->irq_seqno_barrier(engine);
 
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 947af576563b..418451755145 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -558,6 +558,89 @@  static void guc_add_request(struct intel_guc *guc,
 	spin_unlock(&client->wq_lock);
 }
 
+#define GUC_PREEMPT_FINISHED 0x1
+#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
+static void inject_preempt_context(struct work_struct *work)
+{
+	struct intel_engine_cs *engine =
+		container_of(work, typeof(*engine), guc_preempt_work);
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_guc *guc = &dev_priv->guc;
+	struct i915_guc_client *client = guc->client[PREEMPT];
+	struct intel_ring *ring = client->owner->engine[engine->id].ring;
+	u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
+								 engine));
+	u32 *cs = ring->vaddr + ring->tail;
+	u32 data[7];
+
+	if (engine->id == RCS) {
+		cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED,
+				intel_hws_preempt_done_address(engine));
+	} else {
+		cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED,
+				intel_hws_preempt_done_address(engine));
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+	}
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+
+	GEM_BUG_ON(!IS_ALIGNED(ring->size,
+			       GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)));
+	GEM_BUG_ON((void*)cs - (ring->vaddr + ring->tail) !=
+		   GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32));
+
+	ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32);
+	ring->tail &= (ring->size - 1);
+
+	if (i915_vma_is_map_and_fenceable(ring->vma))
+		POSTING_READ_FW(GUC_STATUS);
+
+	spin_lock_irq(&client->wq_lock);
+	guc_wq_item_append(client, engine->guc_id, ctx_desc,
+			   ring->tail / sizeof(u64), 0);
+	spin_unlock_irq(&client->wq_lock);
+
+	data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
+	data[1] = client->stage_id;
+	data[2] = INTEL_GUC_PREEMPT_OPTION_IMMEDIATE |
+		  INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
+		  INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
+	data[3] = engine->guc_id;
+	data[4] = guc->client[SUBMIT]->priority;
+	data[5] = guc->client[SUBMIT]->stage_id;
+	data[6] = guc_ggtt_offset(guc->shared_data);
+
+	if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
+		WRITE_ONCE(engine->execlists.preempt, false);
+		tasklet_schedule(&engine->execlists.irq_tasklet);
+	}
+}
+
+/*
+ * We're using user interrupt and HWSP value to mark that preemption has
+ * finished and GPU is idle. Normally, we could unwind and continue similar to
+ * execlists submission path. Unfortunately, with GuC we also need to wait for
+ * it to finish its own postprocessing, before attempting to submit. Otherwise
+ * GuC may silently ignore our submissions, and thus we risk losing request at
+ * best, executing out-of-order and causing kernel panic at worst.
+ */
+#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
+static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
+{
+	struct intel_guc *guc = &engine->i915->guc;
+	struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
+	struct guc_ctx_report *report = &data->preempt_ctx_report[engine->guc_id];
+
+	WARN_ON(wait_for_atomic(report->report_return_status ==
+				INTEL_GUC_REPORT_STATUS_COMPLETE,
+				GUC_PREEMPT_POSTPROCESS_DELAY_MS));
+	/* GuC is expecting that we're also going to clear the affected context
+	 * counter */
+	report->affected_count = 0;
+}
+
+
 /**
  * i915_guc_submit() - Submit commands through GuC
  * @engine: engine associated with the commands
@@ -634,13 +717,28 @@  static void i915_guc_dequeue(struct intel_engine_cs *engine)
 	bool submit = false;
 	struct rb_node *rb;
 
-	if (port_isset(port))
-		port++;
-
 	spin_lock_irq(&engine->timeline->lock);
 	rb = execlists->first;
 	GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-	while (rb) {
+
+	if (!rb)
+		goto unlock;
+
+	if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) && port_isset(port)) {
+		if (rb_entry(rb, struct i915_priolist, node)->priority >
+		    max(port_request(port)->priotree.priority, 0)) {
+			WRITE_ONCE(execlists->preempt, true);
+			queue_work(engine->i915->guc.preempt_wq,
+				   &engine->guc_preempt_work);
+			goto unlock;
+		} else if (port_isset(last_port)) {
+			goto unlock;
+		}
+
+		port++;
+	}
+
+	do {
 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 		struct drm_i915_gem_request *rq, *rn;
 
@@ -670,13 +768,14 @@  static void i915_guc_dequeue(struct intel_engine_cs *engine)
 		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
-	}
+	} while (rb);
 done:
 	execlists->first = rb;
 	if (submit) {
 		port_assign(port, last);
 		i915_guc_submit(engine);
 	}
+unlock:
 	spin_unlock_irq(&engine->timeline->lock);
 }
 
@@ -685,10 +784,23 @@  static void i915_guc_irq_handler(unsigned long data)
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct execlist_port *port = execlists->port;
-	const struct execlist_port * const last_port =
-		&execlists->port[execlists->port_mask];
 	struct drm_i915_gem_request *rq;
 
+	if (READ_ONCE(execlists->preempt) &&
+	    intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
+	    GUC_PREEMPT_FINISHED) {
+		execlists_cancel_port_requests(&engine->execlists);
+
+		spin_lock_irq(&engine->timeline->lock);
+		intel_engine_unwind_incomplete_requests(engine);
+		spin_unlock_irq(&engine->timeline->lock);
+
+		wait_for_guc_preempt_report(engine);
+
+		WRITE_ONCE(execlists->preempt, false);
+		intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+	}
+
 	rq = port_request(&port[0]);
 	while (rq && i915_gem_request_completed(rq)) {
 		trace_i915_gem_request_out(rq);
@@ -700,7 +812,7 @@  static void i915_guc_irq_handler(unsigned long data)
 		rq = port_request(&port[0]);
 	}
 
-	if (!port_isset(last_port))
+	if (!READ_ONCE(execlists->preempt))
 		i915_guc_dequeue(engine);
 }
 
@@ -1096,12 +1208,21 @@  int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 	if (ret < 0)
 		goto err_shared_data;
 
+	guc->preempt_wq = alloc_workqueue("i915-guc_preempt", WQ_HIGHPRI,
+					  I915_NUM_ENGINES);
+	if (!guc->preempt_wq) {
+		ret = -ENOMEM;
+		goto err_log;
+	}
+
 	ret = guc_ads_create(guc);
 	if (ret < 0)
-		goto err_log;
+		goto err_wq;
 
 	return 0;
 
+err_wq:
+	destroy_workqueue(guc->preempt_wq);
 err_log:
 	intel_guc_log_destroy(guc);
 err_shared_data:
@@ -1116,6 +1237,7 @@  void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
 	struct intel_guc *guc = &dev_priv->guc;
 
 	guc_ads_destroy(guc);
+	destroy_workqueue(guc->preempt_wq);
 	intel_guc_log_destroy(guc);
 	guc_shared_data_destroy(guc);
 	guc_stage_desc_pool_destroy(guc);
@@ -1237,6 +1359,7 @@  int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	for_each_engine(engine, dev_priv, id) {
 		struct intel_engine_execlists * const execlists = &engine->execlists;
 		execlists->irq_tasklet.func = i915_guc_irq_handler;
+		INIT_WORK(&engine->guc_preempt_work, inject_preempt_context);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index c20ed99cbda0..8af6d6579b18 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -59,6 +59,8 @@  struct intel_guc {
 
 	struct i915_guc_client *client[I915_GUC_NUM_CLIENTS];
 
+	struct workqueue_struct *preempt_wq;
+
 	DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
 	/* Cyclic counter mod pagesize	*/
 	u32 db_cacheline;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4f9f12e3c7f6..f5fc41c5c8b3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -354,7 +354,7 @@  static void unwind_wa_tail(struct drm_i915_gem_request *rq)
 	assert_ring_tail_valid(rq->ring, rq->tail);
 }
 
-static void intel_engine_unwind_incomplete_requests(struct intel_engine_cs *engine)
+void intel_engine_unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *rq, *rn;
 	struct i915_priolist *uninitialized_var(p);
@@ -682,7 +682,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 		execlists_submit_ports(engine);
 }
 
-static void
+void
 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 {
 	struct execlist_port *port = execlists->port;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 689fde1a63a9..17182ce29674 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -107,7 +107,6 @@  intel_lr_context_descriptor(struct i915_gem_context *ctx,
 	return ctx->engine[engine->id].lrc_desc;
 }
 
-
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7777a05bfff5..76d0dae1526b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -498,6 +498,8 @@  struct intel_engine_cs {
 
 	bool needs_cmd_parser;
 
+	struct work_struct guc_preempt_work;
+
 	/*
 	 * Table of commands the command parser needs to know about
 	 * for this engine.
@@ -523,6 +525,9 @@  struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
+void
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
+
 static inline unsigned int
 execlists_num_ports(const struct intel_engine_execlists * const execlists)
 {
@@ -541,6 +546,8 @@  execlists_port_complete(struct intel_engine_execlists * const execlists,
 	memset(port + m, 0, sizeof(struct execlist_port));
 }
 
+
+
 static inline unsigned int
 intel_engine_flag(const struct intel_engine_cs *engine)
 {
@@ -708,6 +715,8 @@  int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
 
+void intel_engine_unwind_incomplete_requests(struct intel_engine_cs *engine);
+
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
 	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);