diff mbox

[CI,1/3] drm/i915: Introduce execlist_port_* accessors

Message ID 20171130124554.21746-1-mika.kuoppala@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mika Kuoppala Nov. 30, 2017, 12:45 p.m. UTC
From: Mika Kuoppala <mika.kuoppala@intel.com>

Instead of trusting that first available port is at index 0,
use accessor to hide this. This is a preparation for a
following patches where head can be at arbitrary location
in the port array.

v2: improved commit message, elsp_ready readability (Chris)
v3: s/execlist_port_index/execlist_port (Chris)
v4: rebase to new naming
v5: fix port_next indexing
v6: adapt to preempt
v7: improved _port_next (Chris)
v8: whitespace, for loop (Chris),
    find_first_unset and GEM_BUG_ON after next_port in guc submission

Cc: MichaƂ Winiarski <michal.winiarski@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gpu_error.c       |  5 +-
 drivers/gpu/drm/i915/intel_engine_cs.c      | 18 +++++---
 drivers/gpu/drm/i915/intel_guc_submission.c | 71 ++++++++++++++++++++---------
 drivers/gpu/drm/i915/intel_lrc.c            | 55 +++++++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.h     | 44 +++++++++++++++++-
 5 files changed, 139 insertions(+), 54 deletions(-)

Comments

Mika Kuoppala Dec. 1, 2017, 8:47 a.m. UTC | #1
Patchwork <patchwork@emeril.freedesktop.org> writes:

> == Series Details ==
>
> Series: series starting with [CI,1/3] drm/i915: Introduce execlist_port_* accessors
> URL   : https://patchwork.freedesktop.org/series/34685/
> State : failure
>
> == Summary ==
>
> Series 34685v1 series starting with [CI,1/3] drm/i915: Introduce execlist_port_* accessors
> https://patchwork.freedesktop.org/api/1.0/series/34685/revisions/1/mbox/
>
> Test debugfs_test:
>         Subgroup read_all_entries:
>                 pass       -> DMESG-WARN (fi-skl-6260u)
>                 pass       -> DMESG-WARN (fi-skl-6600u)
>                 pass       -> DMESG-WARN (fi-skl-6700hq)
>                 pass       -> DMESG-WARN (fi-skl-6700k)
>                 pass       -> DMESG-WARN (fi-skl-6770hq)
>                 pass       -> DMESG-WARN (fi-skl-gvtdvm)
>                 pass       -> DMESG-WARN (fi-bxt-dsi)
>                 pass       -> DMESG-WARN (fi-bxt-j4205)

These are similar of:

https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7379/fi-bxt-j4205/igt@debugfs_test@read_all_entries.html

The lockdep is not fully tamed?
Daniel, halp!

-Mika

>                 pass       -> DMESG-WARN (fi-kbl-7500u) fdo#103285
>                 pass       -> DMESG-WARN (fi-kbl-7560u)
>                 pass       -> DMESG-WARN (fi-kbl-7567u)
>                 pass       -> DMESG-WARN (fi-kbl-r)
>                 pass       -> DMESG-WARN (fi-glk-1)
> Test gem_exec_suspend:
>         Subgroup basic-s3:
>                 pass       -> DMESG-FAIL (fi-skl-6700k)
>         Subgroup basic-s4-devices:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_linear_blits:
>         Subgroup basic:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_render_linear_blits:
>         Subgroup basic:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_render_tiled_blits:
>         Subgroup basic:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_ringfill:
>         Subgroup basic-default:
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-default-interruptible:
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-default-forked:
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-default-fd:
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-default-hang:
>                 pass       -> DMESG-WARN (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_sync:
>         Subgroup basic-all:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-each:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-many-each:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-store-all:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-store-each:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_tiled_blits:
>         Subgroup basic:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_tiled_fence_blits:
>         Subgroup basic:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_wait:
>         Subgroup basic-busy-all:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-wait-all:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-await-all:
>                 pass       -> SKIP       (fi-elk-e7500)
>                 pass       -> SKIP       (fi-skl-6700k)
> Test gem_workarounds:
>         Subgroup basic-read:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test kms_busy:
>         Subgroup basic-flip-a:
>                 dmesg-warn -> SKIP       (fi-elk-e7500) fdo#103989 +3
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-flip-b:
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-flip-c:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test kms_chamelium:
>         Subgroup common-hpd-after-suspend:
>                 pass       -> DMESG-WARN (fi-skl-6700k)
> Test kms_cursor_legacy:
>         Subgroup basic-busy-flip-before-cursor-atomic:
>                 pass       -> SKIP       (fi-skl-6700k)
>         Subgroup basic-busy-flip-before-cursor-legacy:
>                 pass       -> SKIP       (fi-skl-6700k)
> Test kms_frontbuffer_tracking:
>         Subgroup basic:
>                 pass       -> SKIP       (fi-skl-6700k) fdo#103735
> WARNING: Long output truncated
>
> 0c2cf368f9fbb26cdf4e355f01c7ddf16e65d25a drm-tip: 2017y-11m-30d-15h-54m-58s UTC integration manifest
> a39595105d3b HAX Enable GuC Submission for CI
> cf01db25c2fc drm/i915: Move execlists port head instead of memmoving array
> df93ca89010e drm/i915: Introduce execlist_port_* accessors
>
> == Logs ==
>
> For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7379/
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 876be8f1d930..02bae7d697da 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1350,11 +1350,12 @@  static void engine_record_requests(struct intel_engine_cs *engine,
 static void error_record_engine_execlists(struct intel_engine_cs *engine,
 					  struct drm_i915_error_engine *ee)
 {
-	const struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct intel_engine_execlists * const execlists = &engine->execlists;
 	unsigned int n;
 
 	for (n = 0; n < execlists_num_ports(execlists); n++) {
-		struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+		struct drm_i915_gem_request *rq =
+			port_request(execlists_port(execlists, n));
 
 		if (!rq)
 			break;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 86d4c85c8725..e36e896d2f84 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1669,7 +1669,7 @@  static void print_request(struct drm_printer *m,
 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 {
 	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
-	const struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_gem_request *rq;
@@ -1782,16 +1782,20 @@  void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 
 		rcu_read_lock();
 		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
-			unsigned int count;
+			struct execlist_port *port;
+			unsigned int count, idx_abs;
+
+			port = execlists_port(execlists, idx);
+			idx_abs = port_index(port, execlists);
 
-			rq = port_unpack(&execlists->port[idx], &count);
+			rq = port_unpack(port, &count);
 			if (rq) {
-				drm_printf(m, "\t\tELSP[%d] count=%d, ",
-					   idx, count);
+				drm_printf(m, "\t\tELSP[%d:%d] count=%d, ",
+					   idx, idx_abs, count);
 				print_request(m, rq, "rq: ");
 			} else {
-				drm_printf(m, "\t\tELSP[%d] idle\n",
-					   idx);
+				drm_printf(m, "\t\tELSP[%d:%d] idle\n",
+					   idx, idx_abs);
 			}
 		}
 		drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 912ff143d531..615782744093 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -697,16 +697,18 @@  static void guc_submit(struct intel_engine_cs *engine)
 {
 	struct intel_guc *guc = &engine->i915->guc;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
 	unsigned int n;
 
 	for (n = 0; n < execlists_num_ports(execlists); n++) {
+		struct execlist_port *port;
 		struct drm_i915_gem_request *rq;
 		unsigned int count;
 
-		rq = port_unpack(&port[n], &count);
+		port = execlists_port(execlists, n);
+		rq = port_unpack(port, &count);
+
 		if (rq && count == 0) {
-			port_set(&port[n], port_pack(rq, ++count));
+			port_set(port, port_pack(rq, ++count));
 
 			flush_ggtt_writes(rq->ring->vma);
 
@@ -715,6 +717,22 @@  static void guc_submit(struct intel_engine_cs *engine)
 	}
 }
 
+static struct execlist_port *
+port_find_first_unset(struct intel_engine_execlists * const execlists,
+		      struct execlist_port * const prev)
+{
+	struct execlist_port *port = execlists_port_next(execlists, prev);
+
+	while (port != prev) {
+		if (!port_isset(port))
+			return port;
+
+		port = execlists_port_next(execlists, port);
+	}
+
+	return NULL;
+}
+
 static void port_assign(struct execlist_port *port,
 			struct drm_i915_gem_request *rq)
 {
@@ -726,10 +744,8 @@  static void port_assign(struct execlist_port *port,
 static void guc_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
+	struct execlist_port *port, *last_port;
 	struct drm_i915_gem_request *last = NULL;
-	const struct execlist_port * const last_port =
-		&execlists->port[execlists->port_mask];
 	bool submit = false;
 	struct rb_node *rb;
 
@@ -740,6 +756,9 @@  static void guc_dequeue(struct intel_engine_cs *engine)
 	if (!rb)
 		goto unlock;
 
+	port = execlists_port_head(execlists);
+	last_port = execlists_port_tail(execlists);
+
 	if (port_isset(port)) {
 		if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
 			struct guc_preempt_work *preempt_work =
@@ -755,8 +774,8 @@  static void guc_dequeue(struct intel_engine_cs *engine)
 			}
 		}
 
-		port++;
-		if (port_isset(port))
+		port = port_find_first_unset(execlists, port);
+		if (!port)
 			goto unlock;
 	}
 	GEM_BUG_ON(port_isset(port));
@@ -775,7 +794,9 @@  static void guc_dequeue(struct intel_engine_cs *engine)
 
 				if (submit)
 					port_assign(port, last);
-				port++;
+
+				port = execlists_port_next(execlists, port);
+				GEM_BUG_ON(port_isset(port));
 			}
 
 			INIT_LIST_HEAD(&rq->priotree.link);
@@ -804,29 +825,37 @@  static void guc_dequeue(struct intel_engine_cs *engine)
 	spin_unlock_irq(&engine->timeline->lock);
 }
 
-static void guc_submission_tasklet(unsigned long data)
+static void guc_complete_ready_ports(struct intel_engine_execlists *execlists)
 {
-	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
-	struct drm_i915_gem_request *rq;
+	struct execlist_port *port = execlists_port_head(execlists);
+
+	while (port_isset(port)) {
+		struct drm_i915_gem_request *rq = port_request(port);
+
+		if (!i915_gem_request_completed(rq))
+			break;
 
-	rq = port_request(&port[0]);
-	while (rq && i915_gem_request_completed(rq)) {
 		trace_i915_gem_request_out(rq);
 		i915_gem_request_put(rq);
 
-		execlists_port_complete(execlists, port);
+		port = execlists_head_complete(execlists, port);
+	};
 
-		rq = port_request(&port[0]);
-	}
-	if (!rq)
+	if (!port_isset(port))
 		execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+}
+
+static void guc_submission_tasklet(unsigned long data)
+{
+	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+
+	guc_complete_ready_ports(execlists);
 
 	if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
 	    intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
 	    GUC_PREEMPT_FINISHED) {
-		execlists_cancel_port_requests(&engine->execlists);
+		execlists_cancel_port_requests(execlists);
 		execlists_unwind_incomplete_requests(execlists);
 
 		wait_for_guc_preempt_report(engine);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2a8160f603ab..bc839729a78c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -430,24 +430,27 @@  static inline void elsp_write(u64 desc, u32 __iomem *elsp)
 
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-	struct execlist_port *port = engine->execlists.port;
+	struct intel_engine_execlists * const execlists = &engine->execlists;
 	u32 __iomem *elsp =
 		engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
 	unsigned int n;
 
-	for (n = execlists_num_ports(&engine->execlists); n--; ) {
+	for (n = execlists_num_ports(execlists); n--; ) {
+		struct execlist_port *port;
 		struct drm_i915_gem_request *rq;
 		unsigned int count;
 		u64 desc;
 
-		rq = port_unpack(&port[n], &count);
+		port = execlists_port(execlists, n);
+		rq = port_unpack(port, &count);
 		if (rq) {
 			GEM_BUG_ON(count > !n);
 			if (!count++)
 				execlists_context_schedule_in(rq);
-			port_set(&port[n], port_pack(rq, count));
+
+			port_set(port, port_pack(rq, count));
 			desc = execlists_update_context(rq);
-			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+			GEM_DEBUG_EXEC(port->context_id = upper_32_bits(desc));
 
 			GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%x\n",
 				  engine->name, n,
@@ -519,10 +522,8 @@  static void inject_preempt_context(struct intel_engine_cs *engine)
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
-	const struct execlist_port * const last_port =
-		&execlists->port[execlists->port_mask];
-	struct drm_i915_gem_request *last = port_request(port);
+	struct execlist_port *port, *last_port;
+	struct drm_i915_gem_request *last;
 	struct rb_node *rb;
 	bool submit = false;
 
@@ -553,6 +554,9 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 	if (!rb)
 		goto unlock;
 
+	port = execlists_port_head(execlists);
+	last = port_request(port);
+
 	if (last) {
 		/*
 		 * Don't resubmit or switch until all outstanding
@@ -560,8 +564,8 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * know the next preemption status we see corresponds
 		 * to this ELSP update.
 		 */
-		GEM_BUG_ON(!port_count(&port[0]));
-		if (port_count(&port[0]) > 1)
+		GEM_BUG_ON(!port_count(port));
+		if (port_count(port) > 1)
 			goto unlock;
 
 		/*
@@ -606,7 +610,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * the driver is unable to keep up the supply of new
 			 * work).
 			 */
-			if (port_count(&port[1]))
+			if (port_count(execlists_port_next(execlists, port)))
 				goto unlock;
 
 			/* WaIdleLiteRestore:bdw,skl
@@ -620,6 +624,8 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 		}
 	}
 
+	last_port = execlists_port_tail(execlists);
+
 	do {
 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 		struct drm_i915_gem_request *rq, *rn;
@@ -666,8 +672,8 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 
 				if (submit)
 					port_assign(port, last);
-				port++;
 
+				port = execlists_port_next(execlists, port);
 				GEM_BUG_ON(port_isset(port));
 			}
 
@@ -700,20 +706,21 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 void
 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 {
-	struct execlist_port *port = execlists->port;
 	unsigned int num_ports = execlists_num_ports(execlists);
+	struct execlist_port *port;
 
-	while (num_ports-- && port_isset(port)) {
+	for (port = execlists_port_head(execlists);
+	     num_ports-- && port_isset(port);
+	     port = execlists_head_complete(execlists, port)) {
 		struct drm_i915_gem_request *rq = port_request(port);
 
 		GEM_BUG_ON(!execlists->active);
 		intel_engine_context_out(rq->engine);
 		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
 		i915_gem_request_put(rq);
-
-		memset(port, 0, sizeof(*port));
-		port++;
 	}
+
+	GEM_BUG_ON(port_isset(execlists_port_head(execlists)));
 }
 
 static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -780,7 +787,6 @@  static void execlists_submission_tasklet(unsigned long data)
 {
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port * const port = execlists->port;
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -799,6 +805,8 @@  static void execlists_submission_tasklet(unsigned long data)
 	 * new request (outside of the context-switch interrupt).
 	 */
 	while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+		struct execlist_port *port;
+
 		/* The HWSP contains a (cacheable) mirror of the CSB */
 		const u32 *buf =
 			&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -839,6 +847,8 @@  static void execlists_submission_tasklet(unsigned long data)
 			  head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))),
 			  tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))));
 
+		port = execlists_port_head(execlists);
+
 		while (head != tail) {
 			struct drm_i915_gem_request *rq;
 			unsigned int status;
@@ -914,14 +924,14 @@  static void execlists_submission_tasklet(unsigned long data)
 			GEM_BUG_ON(count == 0);
 			if (--count == 0) {
 				GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
-				GEM_BUG_ON(port_isset(&port[1]) &&
+				GEM_BUG_ON(port_isset(execlists_port(execlists, 1)) &&
 					   !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
 				GEM_BUG_ON(!i915_gem_request_completed(rq));
 				execlists_context_schedule_out(rq);
 				trace_i915_gem_request_out(rq);
 				i915_gem_request_put(rq);
 
-				execlists_port_complete(execlists, port);
+				port = execlists_head_complete(execlists, port);
 			} else {
 				port_set(port, port_pack(rq, count));
 			}
@@ -961,6 +971,7 @@  static void insert_request(struct intel_engine_cs *engine,
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
+	struct intel_engine_execlists * const execlists = &engine->execlists;
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
@@ -968,7 +979,7 @@  static void execlists_submit_request(struct drm_i915_gem_request *request)
 
 	insert_request(engine, &request->priotree, request->priotree.priority);
 
-	GEM_BUG_ON(!engine->execlists.first);
+	GEM_BUG_ON(!execlists->first);
 	GEM_BUG_ON(list_empty(&request->priotree.link));
 
 	spin_unlock_irqrestore(&engine->timeline->lock, flags);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c68ab3ead83c..17f1fb4ded89 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -251,6 +251,11 @@  struct intel_engine_execlists {
 	unsigned int port_mask;
 
 	/**
+	 * @port_head: first used execlist port
+	 */
+	unsigned int port_head;
+
+	/**
 	 * @queue: queue of requests, in priority lists
 	 */
 	struct rb_root queue;
@@ -643,8 +648,41 @@  execlists_num_ports(const struct intel_engine_execlists * const execlists)
 	return execlists->port_mask + 1;
 }
 
-static inline void
-execlists_port_complete(struct intel_engine_execlists * const execlists,
+#define __port_add(start, n, mask) (((start) + (n)) & (mask))
+#define port_head_add(e, n) __port_add((e)->port_head, n, (e)->port_mask)
+
+/* Index starting from port_head */
+static inline struct execlist_port *
+execlists_port(struct intel_engine_execlists * const execlists,
+	       const unsigned int n)
+{
+	return &execlists->port[port_head_add(execlists, n)];
+}
+
+static inline struct execlist_port *
+execlists_port_head(struct intel_engine_execlists * const execlists)
+{
+	return execlists_port(execlists, 0);
+}
+
+static inline struct execlist_port *
+execlists_port_tail(struct intel_engine_execlists * const execlists)
+{
+	return execlists_port(execlists, -1);
+}
+
+static inline struct execlist_port *
+execlists_port_next(struct intel_engine_execlists * const execlists,
+		    struct execlist_port *port)
+{
+	if (port++ == execlists->port + execlists->port_mask)
+		port = execlists->port;
+
+	return port;
+}
+
+static inline struct execlist_port *
+execlists_head_complete(struct intel_engine_execlists * const execlists,
 			struct execlist_port * const port)
 {
 	const unsigned int m = execlists->port_mask;
@@ -654,6 +692,8 @@  execlists_port_complete(struct intel_engine_execlists * const execlists,
 
 	memmove(port, port + 1, m * sizeof(struct execlist_port));
 	memset(port + m, 0, sizeof(struct execlist_port));
+
+	return execlists_port_head(execlists);
 }
 
 static inline unsigned int