@@ -259,6 +259,7 @@
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
@@ -781,6 +781,66 @@ gen6_add_request(struct intel_engine *ring,
return 0;
}
+static int
+gen8_nonrender_add_request_lrc(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf;
+ struct i915_hw_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *obj = dctx->engine[ring->id].obj;
+ u32 cmd;
+
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
+
+ cmd = MI_FLUSH_DW + 1;
+ cmd |= MI_INVALIDATE_TLB;
+ cmd |= MI_FLUSH_DW_OP_STOREDW;
+
+ intel_ringbuffer_emit(ringbuf, cmd);
+ intel_ringbuffer_emit(ringbuf,
+ ((i915_gem_obj_ggtt_offset(obj)) +
+ (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ringbuffer_emit(ringbuf, 0); /* upper addr */
+ intel_ringbuffer_emit(ringbuf, ring->outstanding_lazy_seqno);
+ intel_ringbuffer_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_ringbuffer_emit(ringbuf, MI_NOOP);
+ intel_ringbuffer_advance_and_submit(ring, ctx);
+
+ return 0;
+}
+
+static int
+gen8_add_request_lrc(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf;
+ struct i915_hw_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *obj = dctx->engine[ring->id].obj;
+ u32 cmd;
+
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
+
+ cmd = MI_STORE_DWORD_IMM_GEN8;
+ cmd |= (1 << 22); /* use global GTT */
+
+ intel_ringbuffer_emit(ringbuf, cmd);
+ intel_ringbuffer_emit(ringbuf,
+ ((i915_gem_obj_ggtt_offset(obj)) +
+ (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+ intel_ringbuffer_emit(ringbuf, 0); /* upper addr */
+ intel_ringbuffer_emit(ringbuf, ring->outstanding_lazy_seqno);
+ intel_ringbuffer_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_ringbuffer_emit(ringbuf, MI_NOOP);
+ intel_ringbuffer_advance_and_submit(ring, ctx);
+
+ return 0;
+}
+
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
u32 seqno)
{
@@ -2047,6 +2107,7 @@ int intel_init_render_ring(struct drm_device *dev)
if (dev_priv->lrc_enabled) {
ring->submit = gen8_submit_ctx;
ring->init = init_render_ring_lrc;
+ ring->add_request = gen8_add_request_lrc;
}
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2224,6 +2285,7 @@ int intel_init_bsd_ring(struct drm_device *dev)
if (dev_priv->lrc_enabled) {
ring->submit = gen8_submit_ctx;
ring->init = init_ring_common_lrc;
+ ring->add_request = gen8_nonrender_add_request_lrc;
}
ring->flush = gen8_ring_flush;
ring->irq_enable_mask =
@@ -2294,13 +2356,14 @@ int intel_init_bsd2_ring(struct drm_device *dev)
if (dev_priv->lrc_enabled) {
ring->submit = gen8_submit_ctx;
+ ring->add_request = gen8_nonrender_add_request_lrc;
ring->init = init_ring_common_lrc;
} else {
ring->submit = ring_write_tail;
+ ring->add_request = gen6_add_request;
ring->init = init_ring_common;
}
ring->flush = gen8_ring_flush;
- ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask =
@@ -2344,6 +2407,7 @@ int intel_init_blt_ring(struct drm_device *dev)
if (dev_priv->lrc_enabled) {
ring->submit = gen8_submit_ctx;
ring->init = init_ring_common_lrc;
+ ring->add_request = gen8_nonrender_add_request_lrc;
}
ring->flush = gen8_ring_flush;
ring->irq_enable_mask =
@@ -2395,6 +2459,7 @@ int intel_init_vebox_ring(struct drm_device *dev)
if (dev_priv->lrc_enabled) {
ring->submit = gen8_submit_ctx;
ring->init = init_ring_common_lrc;
+ ring->add_request = gen8_nonrender_add_request_lrc;
}
ring->flush = gen8_ring_flush;
ring->irq_enable_mask =