@@ -1350,11 +1350,12 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
- const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+ struct drm_i915_gem_request *rq =
+ port_request(execlists_port(execlists, n));
if (!rq)
break;
@@ -1667,7 +1667,7 @@ static void print_request(struct drm_printer *m,
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
{
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *rq;
@@ -1780,16 +1780,20 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
- unsigned int count;
+ struct execlist_port *port;
+ unsigned int count, idx_abs;
+
+ port = execlists_port(execlists, idx);
+ idx_abs = port_index(port, execlists);
- rq = port_unpack(&execlists->port[idx], &count);
+ rq = port_unpack(port, &count);
if (rq) {
- drm_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
+ drm_printf(m, "\t\tELSP[%d:%d] count=%d, ",
+ idx, idx_abs, count);
print_request(m, rq, "rq: ");
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n",
- idx);
+ drm_printf(m, "\t\tELSP[%d:%d] idle\n",
+ idx, idx_abs);
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
@@ -697,16 +697,18 @@ static void guc_submit(struct intel_engine_cs *engine)
{
struct intel_guc *guc = &engine->i915->guc;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct execlist_port *port;
struct drm_i915_gem_request *rq;
unsigned int count;
- rq = port_unpack(&port[n], &count);
+ port = execlists_port(execlists, n);
+ rq = port_unpack(port, &count);
+
if (rq && count == 0) {
- port_set(&port[n], port_pack(rq, ++count));
+ port_set(port, port_pack(rq, ++count));
flush_ggtt_writes(rq->ring->vma);
@@ -715,6 +717,22 @@ static void guc_submit(struct intel_engine_cs *engine)
}
}
+static struct execlist_port *
+port_find_first_unset(struct intel_engine_execlists * const execlists,
+ struct execlist_port * const prev)
+{
+ struct execlist_port *port = execlists_port_next(execlists, prev);
+
+ while (port != prev) {
+ if (!port_isset(port))
+ return port;
+
+ port = execlists_port_next(execlists, port);
+ }
+
+ return NULL;
+}
+
static void port_assign(struct execlist_port *port,
struct drm_i915_gem_request *rq)
{
@@ -726,10 +744,8 @@ static void port_assign(struct execlist_port *port,
static void guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
+ struct execlist_port *port, *last_port;
struct drm_i915_gem_request *last = NULL;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
bool submit = false;
struct rb_node *rb;
@@ -740,6 +756,9 @@ static void guc_dequeue(struct intel_engine_cs *engine)
if (!rb)
goto unlock;
+ port = execlists_port_head(execlists);
+ last_port = execlists_port_tail(execlists);
+
if (port_isset(port)) {
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
struct guc_preempt_work *preempt_work =
@@ -755,8 +774,8 @@ static void guc_dequeue(struct intel_engine_cs *engine)
}
}
- port++;
- if (port_isset(port))
+ port = port_find_first_unset(execlists, port);
+ if (!port)
goto unlock;
}
GEM_BUG_ON(port_isset(port));
@@ -775,7 +794,9 @@ static void guc_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlists_port_next(execlists, port);
+ GEM_BUG_ON(port_isset(port));
}
INIT_LIST_HEAD(&rq->priotree.link);
@@ -804,29 +825,37 @@ static void guc_dequeue(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock);
}
-static void guc_submission_tasklet(unsigned long data)
+static void guc_complete_ready_ports(struct intel_engine_execlists *execlists)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *rq;
+ struct execlist_port *port = execlists_port_head(execlists);
+
+ while (port_isset(port)) {
+ struct drm_i915_gem_request *rq = port_request(port);
+
+ if (!i915_gem_request_completed(rq))
+ break;
- rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- execlists_port_complete(execlists, port);
+ port = execlists_head_complete(execlists, port);
+ };
- rq = port_request(&port[0]);
- }
- if (!rq)
+ if (!port_isset(port))
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+}
+
+static void guc_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ guc_complete_ready_ports(execlists);
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
GUC_PREEMPT_FINISHED) {
- execlists_cancel_port_requests(&engine->execlists);
+ execlists_cancel_port_requests(execlists);
execlists_unwind_incomplete_requests(execlists);
wait_for_guc_preempt_report(engine);
@@ -430,24 +430,27 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlists.port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
u32 __iomem *elsp =
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- for (n = execlists_num_ports(&engine->execlists); n--; ) {
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct execlist_port *port;
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
- rq = port_unpack(&port[n], &count);
+ port = execlists_port(execlists, n);
+ rq = port_unpack(port, &count);
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
execlists_context_schedule_in(rq);
- port_set(&port[n], port_pack(rq, count));
+
+ port_set(port, port_pack(rq, count));
desc = execlists_update_context(rq);
- GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+ GEM_DEBUG_EXEC(port->context_id = upper_32_bits(desc));
GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
engine->name, n,
@@ -519,10 +522,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *last = port_request(port);
+ struct execlist_port *port, *last_port;
+ struct drm_i915_gem_request *last;
struct rb_node *rb;
bool submit = false;
@@ -553,6 +554,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!rb)
goto unlock;
+ port = execlists_port_head(execlists);
+ last = port_request(port);
+
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -560,8 +564,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
- GEM_BUG_ON(!port_count(&port[0]));
- if (port_count(&port[0]) > 1)
+ GEM_BUG_ON(!port_count(port));
+ if (port_count(port) > 1)
goto unlock;
/*
@@ -606,7 +610,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the driver is unable to keep up the supply of new
* work).
*/
- if (port_count(&port[1]))
+ if (port_count(execlists_port_next(execlists, port)))
goto unlock;
/* WaIdleLiteRestore:bdw,skl
@@ -620,6 +624,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
+ last_port = execlists_port_tail(execlists);
+
do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
@@ -666,8 +672,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+ port = execlists_port_next(execlists, port);
GEM_BUG_ON(port_isset(port));
}
@@ -700,20 +706,21 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
- struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
+ struct execlist_port *port;
- while (num_ports-- && port_isset(port)) {
+ for (port = execlists_port_head(execlists);
+ num_ports-- && port_isset(port);
+ port = execlists_head_complete(execlists, port)) {
struct drm_i915_gem_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_gem_request_put(rq);
-
- memset(port, 0, sizeof(*port));
- port++;
}
+
+ GEM_BUG_ON(port_isset(execlists_port_head(execlists)));
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -780,7 +787,6 @@ static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port * const port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -799,6 +805,8 @@ static void execlists_submission_tasklet(unsigned long data)
* new request (outside of the context-switch interrupt).
*/
while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ struct execlist_port *port;
+
/* The HWSP contains a (cacheable) mirror of the CSB */
const u32 *buf =
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -839,6 +847,8 @@ static void execlists_submission_tasklet(unsigned long data)
head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))),
tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))));
+ port = execlists_port_head(execlists);
+
while (head != tail) {
struct drm_i915_gem_request *rq;
unsigned int status;
@@ -914,14 +924,14 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
+ GEM_BUG_ON(port_isset(execlists_port(execlists, 1)) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
GEM_BUG_ON(!i915_gem_request_completed(rq));
execlists_context_schedule_out(rq);
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- execlists_port_complete(execlists, port);
+ port = execlists_head_complete(execlists, port);
} else {
port_set(port, port_pack(rq, count));
}
@@ -961,6 +971,7 @@ static void insert_request(struct intel_engine_cs *engine,
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -968,7 +979,7 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
insert_request(engine, &request->priotree, request->priotree.priority);
- GEM_BUG_ON(!engine->execlists.first);
+ GEM_BUG_ON(!execlists->first);
GEM_BUG_ON(list_empty(&request->priotree.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -251,6 +251,11 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @port_head: first used execlist port
+ */
+ unsigned int port_head;
+
+ /**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
@@ -643,8 +648,41 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
return execlists->port_mask + 1;
}
-static inline void
-execlists_port_complete(struct intel_engine_execlists * const execlists,
+#define __port_add(start, n, mask) (((start) + (n)) & (mask))
+#define port_head_add(e, n) __port_add((e)->port_head, n, (e)->port_mask)
+
+/* Index starting from port_head */
+static inline struct execlist_port *
+execlists_port(struct intel_engine_execlists * const execlists,
+ const unsigned int n)
+{
+ return &execlists->port[port_head_add(execlists, n)];
+}
+
+static inline struct execlist_port *
+execlists_port_head(struct intel_engine_execlists * const execlists)
+{
+ return execlists_port(execlists, 0);
+}
+
+static inline struct execlist_port *
+execlists_port_tail(struct intel_engine_execlists * const execlists)
+{
+ return execlists_port(execlists, -1);
+}
+
+static inline struct execlist_port *
+execlists_port_next(struct intel_engine_execlists * const execlists,
+ struct execlist_port *port)
+{
+ if (port++ == execlists->port + execlists->port_mask)
+ port = execlists->port;
+
+ return port;
+}
+
+static inline struct execlist_port *
+execlists_head_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port)
{
const unsigned int m = execlists->port_mask;
@@ -654,6 +692,8 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
memmove(port, port + 1, m * sizeof(struct execlist_port));
memset(port + m, 0, sizeof(struct execlist_port));
+
+ return execlists_port_head(execlists);
}
static inline unsigned int