@@ -1022,6 +1022,82 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
i915_gem_context_unreference(ctx);
}
+/**
+ * intel_execlist_get_current_request() - returns request currently processed
+ * by the given engine
+ *
+ * @engine: Engine currently running context to be returned.
+ *
+ * Returns:
+ * req - if a valid req is found in the execlist queue and HW also agrees.
+ * caller has to dereference at the end of its lifecycle.
+ * NULL - otherwise
+ */
+static struct drm_i915_gem_request *
+intel_execlist_get_current_request(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->execlist_lock, flags);
+
+ req = list_first_entry_or_null(&engine->execlist_queue,
+ struct drm_i915_gem_request,
+ execlist_link);
+ /*
+ * Only acknowledge the request in the execlist queue if it's actually
+ * been submitted to hardware, otherwise there's the risk of
+ * inconsistency between the (unsubmitted) request and the idle
+ * hardware state.
+ */
+ if (req && req->ctx && req->elsp_submitted) {
+ u32 execlist_status;
+ u32 hw_context;
+ u32 hw_active;
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ hw_context = I915_READ(RING_EXECLIST_STATUS_CTX_ID(engine));
+ execlist_status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+ hw_active = ((execlist_status & EXECLIST_STATUS_ELEMENT0_ACTIVE) ||
+ (execlist_status & EXECLIST_STATUS_ELEMENT1_ACTIVE));
+
+ /* If both HW and driver agrees then we found it */
+ if (hw_active && hw_context == req->ctx->hw_id)
+ i915_gem_request_reference(req);
+ } else {
+ req = NULL;
+ WARN(1, "No active request for %s\n", engine->name);
+ }
+
+ spin_unlock_irqrestore(&engine->execlist_lock, flags);
+
+ return req;
+}
+
+/**
+ * gen8_engine_state_save() - save minimum engine state
+ * @engine: engine whose state is to be saved
+ * @state: location where the state is saved
+ *
+ * captured engine state includes head, tail, active request. After reset,
+ * engine is restarted with this state.
+ *
+ * Returns:
+ * 0 if ok, otherwise propagates error codes.
+ */
+static int gen8_engine_state_save(struct intel_engine_cs *engine,
+ struct intel_engine_cs_state *state)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ state->head = I915_READ_HEAD(engine);
+ state->req = intel_execlist_get_current_request(engine);
+ if (!state->req)
+ return -EINVAL;
+
+ return 0;
+}
+
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
@@ -1977,6 +2053,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_bb_start = gen8_emit_bb_start;
engine->get_seqno = gen8_get_seqno;
engine->set_seqno = gen8_set_seqno;
+
+ /* engine reset supporting functions */
+ engine->save = gen8_engine_state_save;
+
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
engine->set_seqno = bxt_a_set_seqno;
@@ -31,7 +31,10 @@
/* Execlists regs */
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
+#define EXECLIST_STATUS_ELEMENT0_ACTIVE (1 << 14)
+#define EXECLIST_STATUS_ELEMENT1_ACTIVE (1 << 15)
#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
+#define RING_EXECLIST_STATUS_CTX_ID(ring) RING_EXECLIST_STATUS_HI(ring)
#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
@@ -141,6 +141,12 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_object *obj;
};
+struct intel_engine_cs_state {
+ u32 head;
+ u32 tail;
+ struct drm_i915_gem_request *req;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
const char *name;
@@ -204,6 +210,10 @@ struct intel_engine_cs {
#define I915_DISPATCH_RS 0x4
void (*cleanup)(struct intel_engine_cs *ring);
+ /* engine reset supporting functions */
+ int (*save)(struct intel_engine_cs *engine,
+ struct intel_engine_cs_state *state);
+
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
* RCS VCS BCS VECS VCS2