@@ -1970,6 +1970,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_engine_cs *ring,
+ struct intel_context *ctx,
struct drm_i915_gem_object *ctx_obj)
{
struct page *page;
@@ -1984,7 +1985,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
}
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx_obj));
+ intel_execlists_ctx_id(ctx));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
@@ -2033,7 +2034,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
if (ring->default_context != ctx)
- i915_dump_lrc_obj(m, ring,
+ i915_dump_lrc_obj(m, ring, ctx,
ctx->engine[i].state);
}
}
@@ -2112,7 +2113,7 @@ static int i915_execlists(struct seq_file *m, void *data)
ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(ctx_obj));
+ intel_execlists_ctx_id(head_req->ctx));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -260,7 +260,7 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
/**
* intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx_obj: Logical Ring Context backing object.
+ * @ctx: LR context
*
* Do not confuse with ctx->id! Unfortunately we have a name overload
* here: the old context ID we pass to userspace as a handler so that
@@ -269,15 +269,15 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
* interrupts.
*
* Return: 20-bits globally unique context ID.
+ *
+ * Further the ID given to HW can now be relied on to be constant for
+ * the lifetime of the context, unlike previously when we used an
+ * associated logical ring context address (which could be repinned at
+ * a different address).
*/
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+u32 intel_execlists_ctx_id(struct intel_context *ctx)
{
- u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
- LRC_PPHWSP_PN * PAGE_SIZE;
-
- /* LRCA is required to be 4K aligned so the more significant 20 bits
- * are globally unique */
- return lrca >> 12;
+ return ctx->global_id;
}
static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
@@ -305,7 +305,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
desc |= lrca;
- desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
+ desc |= (u64)intel_execlists_ctx_id(ctx) << GEN8_CTX_ID_SHIFT;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
@@ -475,9 +475,7 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
execlist_link);
if (head_req != NULL) {
- struct drm_i915_gem_object *ctx_obj =
- head_req->ctx->engine[ring->id].state;
- if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+ if (intel_execlists_ctx_id(head_req->ctx) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
@@ -93,7 +93,7 @@ struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
+u32 intel_execlists_ctx_id(struct intel_context *ctx);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);