@@ -1333,11 +1333,13 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
- const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+ struct drm_i915_gem_request *rq;
+
+ rq = port_request(execlists_port(execlists, n));
if (!rq)
break;
@@ -678,16 +678,18 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
{
struct intel_guc *guc = &engine->i915->guc;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct execlist_port *port;
struct drm_i915_gem_request *rq;
unsigned int count;
- rq = port_unpack(&port[n], &count);
+ port = execlists_port(execlists, n);
+ rq = port_unpack(port, &count);
+
if (rq && count == 0) {
- port_set(&port[n], port_pack(rq, ++count));
+ port_set(port, port_pack(rq, ++count));
flush_ggtt_writes(rq->ring->vma);
@@ -710,10 +712,8 @@ static void port_assign(struct execlist_port *port,
static void i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
+ struct execlist_port *port, *last_port;
struct drm_i915_gem_request *last = NULL;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
bool submit = false;
struct rb_node *rb;
@@ -724,6 +724,9 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
if (!rb)
goto unlock;
+ port = execlists_port_head(execlists);
+ last_port = execlists_port_tail(execlists);
+
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) && port_isset(port)) {
struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id];
@@ -739,7 +742,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
goto unlock;
}
- port++;
+ port = execlists_port_next(execlists, port);
}
do {
@@ -756,7 +759,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlists_port_next(execlists, port);
}
INIT_LIST_HEAD(&rq->priotree.link);
@@ -784,24 +788,32 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock);
}
-static void i915_guc_irq_handler(unsigned long data)
+static void guc_complete_ready_ports(struct intel_engine_execlists * const execlists)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *rq;
+ struct execlist_port *port = execlists_port_head(execlists);
+
+ while (port_isset(port)) {
+ struct drm_i915_gem_request *rq = port_request(port);
+
+ if (!i915_gem_request_completed(rq))
+ break;
- rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- execlists_port_complete(execlists, port);
+ port = execlists_head_complete(execlists, port);
+ };
- rq = port_request(&port[0]);
- }
- if (!rq)
+ if (!port_isset(port))
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+}
+
+static void i915_guc_irq_handler(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ guc_complete_ready_ports(execlists);
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
@@ -1673,7 +1673,7 @@ static void print_request(struct drm_printer *m,
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
{
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *rq;
@@ -1777,16 +1777,20 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
- unsigned int count;
+ struct execlist_port *port;
+ unsigned int count, idx_abs;
+
+ port = execlists_port(execlists, idx);
+ idx_abs = port_index(port, execlists);
- rq = port_unpack(&execlists->port[idx], &count);
+ rq = port_unpack(port, &count);
if (rq) {
- drm_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
+ drm_printf(m, "\t\tELSP[%d:%d] count=%d, ",
+ idx, idx_abs, count);
print_request(m, rq, "rq: ");
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n",
- idx);
+ drm_printf(m, "\t\tELSP[%d:%d] idle\n",
+ idx, idx_abs);
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
@@ -448,24 +448,26 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlists.port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
u32 __iomem *elsp =
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- for (n = execlists_num_ports(&engine->execlists); n--; ) {
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct execlist_port *port;
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
- rq = port_unpack(&port[n], &count);
+ port = execlists_port(execlists, n);
+ rq = port_unpack(port, &count);
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- port_set(&port[n], port_pack(rq, count));
+ port_set(port, port_pack(rq, count));
desc = execlists_update_context(rq);
- GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+ GEM_DEBUG_EXEC(port->context_id = upper_32_bits(desc));
} else {
GEM_BUG_ON(!n);
desc = 0;
@@ -529,10 +531,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *last = port_request(port);
+ struct execlist_port *port, *last_port;
+ struct drm_i915_gem_request *last;
struct rb_node *rb;
bool submit = false;
@@ -563,6 +563,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!rb)
goto unlock;
+ port = execlists_port_head(execlists);
+ last = port_request(port);
+
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -570,7 +573,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
- if (port_count(&port[0]) > 1)
+ if (port_count(port) > 1)
goto unlock;
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
@@ -605,7 +608,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the driver is unable to keep up the supply of new
* work).
*/
- if (port_count(&port[1]))
+ if (port_count(execlists_port_next(execlists, port)))
goto unlock;
/* WaIdleLiteRestore:bdw,skl
@@ -619,6 +622,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
+ last_port = execlists_port_tail(execlists);
+
do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
@@ -665,7 +670,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlists_port_next(execlists, port);
GEM_BUG_ON(port_isset(port));
}
@@ -699,8 +705,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
- struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
+ struct execlist_port *port;
+
+ port = execlists_port_head(execlists);
while (num_ports-- && port_isset(port)) {
struct drm_i915_gem_request *rq = port_request(port);
@@ -709,9 +717,10 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_gem_request_put(rq);
- memset(port, 0, sizeof(*port));
- port++;
+ port = execlists_head_complete(execlists, port);
}
+
+ GEM_BUG_ON(port_isset(execlists_port_head(execlists)));
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -778,7 +787,6 @@ static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port * const port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -797,6 +805,8 @@ static void intel_lrc_irq_handler(unsigned long data)
* new request (outside of the context-switch interrupt).
*/
while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ struct execlist_port *port;
+
/* The HWSP contains a (cacheable) mirror of the CSB */
const u32 *buf =
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -833,6 +843,8 @@ static void intel_lrc_irq_handler(unsigned long data)
tail = READ_ONCE(buf[write_idx]);
}
+ port = execlists_port_head(execlists);
+
while (head != tail) {
struct drm_i915_gem_request *rq;
unsigned int status;
@@ -895,7 +907,7 @@ static void intel_lrc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- execlists_port_complete(execlists, port);
+ port = execlists_head_complete(execlists, port);
} else {
port_set(port, port_pack(rq, count));
}
@@ -935,6 +947,7 @@ static void insert_request(struct intel_engine_cs *engine,
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -942,7 +955,7 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
insert_request(engine, &request->priotree, request->priotree.priority);
- GEM_BUG_ON(!engine->execlists.first);
+ GEM_BUG_ON(!execlists->first);
GEM_BUG_ON(list_empty(&request->priotree.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -259,6 +259,11 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @port_head: first used execlist port
+ */
+ unsigned int port_head;
+
+ /**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
@@ -569,8 +574,41 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
return execlists->port_mask + 1;
}
-static inline void
-execlists_port_complete(struct intel_engine_execlists * const execlists,
+#define __port_add(start, n, mask) (((start) + (n)) & (mask))
+#define port_head_add(e, n) __port_add((e)->port_head, n, (e)->port_mask)
+
+/* Index starting from port_head */
+static inline struct execlist_port *
+execlists_port(struct intel_engine_execlists * const execlists,
+ const unsigned int n)
+{
+ return &execlists->port[port_head_add(execlists, n)];
+}
+
+static inline struct execlist_port *
+execlists_port_head(struct intel_engine_execlists * const execlists)
+{
+ return execlists_port(execlists, 0);
+}
+
+static inline struct execlist_port *
+execlists_port_tail(struct intel_engine_execlists * const execlists)
+{
+ return execlists_port(execlists, -1);
+}
+
+static inline struct execlist_port *
+execlists_port_next(struct intel_engine_execlists * const execlists,
+ const struct execlist_port * const port)
+{
+ const unsigned int n = __port_add(port_index(port, execlists),
+ 1,
+ execlists->port_mask);
+ return &execlists->port[n];
+}
+
+static inline struct execlist_port *
+execlists_head_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port)
{
const unsigned int m = execlists->port_mask;
@@ -580,6 +618,8 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
memmove(port, port + 1, m * sizeof(struct execlist_port));
memset(port + m, 0, sizeof(struct execlist_port));
+
+ return execlists_port_head(execlists);
}
static inline unsigned int