@@ -2755,6 +2755,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_elsq)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption)
@@ -583,7 +583,8 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, \
.gen = 11, \
.ddb_size = 2048, \
- .has_csr = 0
+ .has_csr = 0, \
+ .has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
@@ -96,6 +96,7 @@ enum intel_platform {
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
+ func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
func(has_overlay); \
func(has_pooled_eu); \
@@ -399,18 +399,30 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
return ce->lrc_desc;
}
-static inline void elsp_write(u64 desc, u32 __iomem *elsp)
+static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
{
- writel(upper_32_bits(desc), elsp);
- writel(lower_32_bits(desc), elsp);
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
}
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlists.port;
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
unsigned int n;
- for (n = execlists_num_ports(&engine->execlists); n--; ) {
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
@@ -433,9 +445,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = 0;
}
- elsp_write(desc, engine->execlists.elsp);
+ write_desc(execlists, desc, n);
}
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
@@ -469,11 +486,12 @@ static void port_assign(struct execlist_port *port,
static void inject_preempt_context(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce =
&engine->i915->preempt_context->engine[engine->id];
unsigned int n;
- GEM_BUG_ON(engine->execlists.preempt_complete_status !=
+ GEM_BUG_ON(execlists->preempt_complete_status !=
upper_32_bits(ce->lrc_desc));
GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
@@ -489,11 +507,16 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
GEM_TRACE("%s\n", engine->name);
- for (n = execlists_num_ports(&engine->execlists); --n; )
- elsp_write(0, engine->execlists.elsp);
+ for (n = execlists_num_ports(execlists); --n; )
+ write_desc(execlists, 0, n);
+
+ write_desc(execlists, ce->lrc_desc, n);
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
- elsp_write(ce->lrc_desc, engine->execlists.elsp);
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -2068,8 +2091,15 @@ static int logical_ring_init(struct intel_engine_cs *engine)
if (ret)
goto error;
- engine->execlists.elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
+ engine->execlists.submit_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
+ engine->execlists.ctrl_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+ } else {
+ engine->execlists.submit_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_ELSP(engine));
+ }
engine->execlists.preempt_complete_status = ~0u;
if (engine->i915->preempt_context)
@@ -2338,7 +2368,7 @@ populate_lr_context(struct i915_gem_context *ctx,
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ctx == ctx->i915->preempt_context)
+ if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
@@ -42,6 +42,9 @@
#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
+#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
+#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status
* is written out to the last available status QW at offset 5h, this pointer
@@ -200,9 +200,17 @@ struct intel_engine_execlists {
bool no_priolist;
/**
- * @elsp: the ExecList Submission Port register
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
*/
- u32 __iomem *elsp;
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
/**
* @port: execlist port states