@@ -183,6 +183,10 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+ /** watchdog_threshold: hw watchdog threshold value,
+ * in clock counts
+ */
+ u32 watchdog_threshold;
/**
* active_tracker: Active tracker for the external rq activity
@@ -324,6 +324,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (engine->context_size)
DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
+ engine->watchdog_disable_id = get_watchdog_disable(engine);
+
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -2193,16 +2193,75 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
atomic_read(&execlists->tasklet.count));
}
+static u32 *gen8_emit_start_watchdog(struct i915_request *rq, u32 *cs)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *ctx = rq->gem_context;
+ struct intel_context *ce = to_intel_context(ctx, engine);
+
+ GEM_BUG_ON(!intel_engine_supports_watchdog(engine));
+
+ /*
+ * watchdog register must never be programmed to zero. This would
+ * cause the watchdog counter to exceed and not allow the engine to
+ * go into IDLE state
+ */
+ GEM_BUG_ON(ce->watchdog_threshold == 0);
+
+ /* Set counter period */
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(RING_THRESH(engine->mmio_base));
+ *cs++ = ce->watchdog_threshold;
+ /* Start counter */
+ *cs++ = i915_mmio_reg_offset(RING_CNTR(engine->mmio_base));
+ *cs++ = GEN8_WATCHDOG_ENABLE;
+
+ return cs;
+}
+
+static u32 *gen8_emit_stop_watchdog(struct i915_request *rq, u32 *cs)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ GEM_BUG_ON(!intel_engine_supports_watchdog(engine));
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_CNTR(engine->mmio_base));
+ *cs++ = engine->watchdog_disable_id;
+
+ return cs;
+}
+
static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
+ u32 num_dwords;
+ bool enable_watchdog = false;
- cs = intel_ring_begin(rq, 6);
+ /* bb_start only */
+ num_dwords = 6;
+
+ /* check if watchdog will be required */
+ if (to_intel_context(rq->gem_context, engine)->watchdog_threshold != 0) {
+
+ /* + start_watchdog (6) + stop_watchdog (4) */
+ num_dwords += 10;
+ enable_watchdog = true;
+ }
+
+ cs = intel_ring_begin(rq, num_dwords);
if (IS_ERR(cs))
return PTR_ERR(cs);
+ if (enable_watchdog) {
+ /* Start watchdog timer */
+ cs = gen8_emit_start_watchdog(rq, cs);
+ engine->current_seqno = intel_engine_get_hangcheck_seqno(engine);
+ }
+
/*
* WaDisableCtxRestoreArbitration:bdw,chv
*
@@ -2229,10 +2288,16 @@ static int gen8_emit_bb_start(struct i915_request *rq,
*cs++ = upper_32_bits(offset);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ if (enable_watchdog) {
+ /* Cancel watchdog timer */
+ cs = gen8_emit_stop_watchdog(rq, cs);
+ }
+
+ if (*cs%2 != 0)
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -2353,7 +2418,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
}
/* From GEN9 onwards, all engines use the same RING_CNTR format */
-static inline u32 get_watchdog_disable(struct intel_engine_cs *engine)
+u32 get_watchdog_disable(struct intel_engine_cs *engine)
{
if (engine->id == RCS || INTEL_GEN(engine->i915) >= 9)
return GEN8_WATCHDOG_DISABLE;
@@ -2532,6 +2597,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
I915_SCHEDULER_CAP_PRIORITY;
if (intel_engine_has_preemption(engine))
engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
+
+ if(engine->id != BCS)
+ engine->flags |= I915_ENGINE_SUPPORTS_WATCHDOG;
}
static void
@@ -2710,6 +2778,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
if (err)
return err;
+ /* BCS engine does not have a watchdog-expired irq */
+ GEM_BUG_ON(!intel_engine_supports_watchdog(engine));
+
return logical_ring_init(engine);
}
@@ -120,4 +120,6 @@ void intel_virtual_engine_put(struct intel_engine_cs *engine);
u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu);
+u32 get_watchdog_disable(struct intel_engine_cs *engine);
+
#endif /* _INTEL_LRC_H_ */
@@ -360,6 +360,7 @@ struct intel_engine_cs {
unsigned int guc_id;
unsigned long mask;
u32 current_seqno;
+ u32 watchdog_disable_id;
u8 uabi_class;
@@ -463,6 +464,7 @@ struct intel_engine_cs {
int (*init_context)(struct i915_request *rq);
int (*emit_flush)(struct i915_request *request, u32 mode);
+
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
@@ -520,10 +522,12 @@ struct intel_engine_cs {
struct intel_engine_hangcheck hangcheck;
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
-#define I915_ENGINE_SUPPORTS_STATS BIT(1)
-#define I915_ENGINE_HAS_PREEMPTION BIT(2)
-#define I915_ENGINE_IS_VIRTUAL BIT(3)
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION BIT(2)
+#define I915_ENGINE_IS_VIRTUAL BIT(3)
+#define I915_ENGINE_SUPPORTS_WATCHDOG BIT(4)
+
unsigned int flags;
/*
@@ -612,6 +616,12 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
+static inline bool
+intel_engine_supports_watchdog(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_WATCHDOG;
+}
+
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)