@@ -3313,6 +3313,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ struct intel_engine_execlist * const el = &engine->execlist;
u32 ptr, read, write;
unsigned int idx;
@@ -3346,11 +3347,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
}
rcu_read_lock();
- for (idx = 0; idx < ARRAY_SIZE(engine->execlist.port); idx++) {
+ for (idx = 0; idx < execlist_num_ports(el); idx++) {
unsigned int count;
- rq = port_unpack(&engine->execlist.port[idx],
- &count);
+ rq = port_unpack(&el->port[idx], &count);
if (rq) {
seq_printf(m, "\t\tELSP[%d] count=%d, ",
idx, count);
@@ -3363,7 +3363,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock);
- for (rb = engine->execlist.first; rb; rb = rb_next(rb)) {
+ for (rb = el->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
@@ -1000,7 +1000,8 @@ struct i915_gpu_state {
u32 seqno;
u32 head;
u32 tail;
- } *requests, execlist[2];
+ } *requests, execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -396,6 +396,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
static void error_print_engine(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
+ int n;
+
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
@@ -465,8 +467,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
err_printf(m, " engine reset count: %u\n", ee->reset_count);
- error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
- error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
+ for (n = 0; n < ee->num_ports; n++) {
+ err_printf(m, " ELSP[%d]:", n);
+ error_print_request(m, " ", &ee->execlist[n]);
+ }
+
error_print_context(m, " Active context: ", &ee->context);
}
@@ -1327,17 +1332,19 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
- const struct execlist_port *port = engine->execlist.port;
+ const struct intel_engine_execlist * const el = &engine->execlist;
unsigned int n;
- for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++) {
- struct drm_i915_gem_request *rq = port_request(&port[n]);
+ for (n = 0; n < execlist_num_ports(el); n++) {
+ struct drm_i915_gem_request *rq = port_request(&el->port[n]);
if (!rq)
break;
record_request(rq, &ee->execlist[n]);
}
+
+ ee->num_ports = n;
}
static void record_context(struct drm_i915_error_context *e,
@@ -562,6 +562,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlist * const el = &engine->execlist;
struct execlist_port *port = el->port;
struct drm_i915_gem_request *last = NULL;
+ const struct execlist_port * const last_port =
+ &el->port[el->port_mask];
bool submit = false;
struct rb_node *rb;
@@ -577,7 +579,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
- if (port != el->port) {
+ if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -617,6 +619,8 @@ static void i915_guc_irq_handler(unsigned long data)
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlist * const el = &engine->execlist;
struct execlist_port *port = el->port;
+ const struct execlist_port * const last_port =
+ &el->port[el->port_mask];
struct drm_i915_gem_request *rq;
rq = port_request(&port[0]);
@@ -629,7 +633,7 @@ static void i915_guc_irq_handler(unsigned long data)
rq = port_request(&port[0]);
}
- if (!port_isset(&port[1]))
+ if (!port_isset(last_port))
i915_guc_dequeue(engine);
}
@@ -403,6 +403,10 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
el->csb_use_mmio = csb_force_mmio(engine->i915);
+ engine->execlist.port_mask = 1;
+ BUILD_BUG_ON_NOT_POWER_OF_2(execlist_num_ports(&engine->execlist));
+ GEM_BUG_ON(execlist_num_ports(&engine->execlist) > EXECLIST_MAX_PORTS);
+
el->queue = RB_ROOT;
el->first = NULL;
}
@@ -399,7 +399,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- for (n = ARRAY_SIZE(engine->execlist.port); n--; ) {
+ for (n = execlist_num_ports(&engine->execlist); n--; ) {
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
@@ -456,6 +456,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct drm_i915_gem_request *last;
struct intel_engine_execlist * const el = &engine->execlist;
struct execlist_port *port = el->port;
+ const struct execlist_port * const last_port =
+ &el->port[el->port_mask];
struct rb_node *rb;
bool submit = false;
@@ -515,7 +517,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port != el->port) {
+ if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -234,7 +234,14 @@ struct intel_engine_execlist {
* @context_id: context ID for port
*/
GEM_DEBUG_DECL(u32 context_id);
- } port[2];
+
+#define EXECLIST_MAX_PORTS 2
+ } port[EXECLIST_MAX_PORTS];
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
/**
* @queue: queue of requests, in priority lists
@@ -511,16 +518,22 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
+static inline unsigned int
+execlist_num_ports(const struct intel_engine_execlist * const el)
+{
+ return el->port_mask + 1;
+}
+
static inline void
execlist_port_complete(struct intel_engine_execlist * const el,
struct execlist_port * const port)
{
- struct execlist_port * const port1 = &el->port[1];
+ const unsigned int m = el->port_mask;
GEM_BUG_ON(port_index(port, el) != 0);
- *port = *port1;
- memset(port1, 0, sizeof(struct execlist_port));
+ memmove(port, port + 1, m * sizeof(struct execlist_port));
+ memset(port + m, 0, sizeof(struct execlist_port));
}
static inline unsigned int
As we emulate execlists on top of the GuC workqueue, it is not restricted to just 2 ports and we can increase that number arbitrarily to trade-off queue depth (i.e. scheduling latency) against pipeline bubbles. v2: rebase. better commit msg (Chris) Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> --- drivers/gpu/drm/i915/i915_debugfs.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.h | 3 ++- drivers/gpu/drm/i915/i915_gpu_error.c | 17 ++++++++++++----- drivers/gpu/drm/i915/i915_guc_submission.c | 8 ++++++-- drivers/gpu/drm/i915/intel_engine_cs.c | 4 ++++ drivers/gpu/drm/i915/intel_lrc.c | 6 ++++-- drivers/gpu/drm/i915/intel_ringbuffer.h | 21 +++++++++++++++++---- 7 files changed, 49 insertions(+), 18 deletions(-)