@@ -1801,6 +1801,30 @@ static void gen6_bsd_ring_write_tail(struct intel_engine *ring,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
+static int gen8_ring_flush(struct intel_engine *ring,
+ u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW + 1;
+
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
static int gen6_bsd_ring_flush(struct intel_engine *ring,
u32 invalidate, u32 flush)
{
@@ -1812,8 +1836,7 @@ static int gen6_bsd_ring_flush(struct intel_engine *ring,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
- cmd += 1;
+
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -1825,13 +1848,9 @@ static int gen6_bsd_ring_flush(struct intel_engine *ring,
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
- } else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- }
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+
intel_ring_advance(ring);
return 0;
}
@@ -1916,8 +1935,7 @@ static int gen6_ring_flush(struct intel_engine *ring,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
- cmd += 1;
+
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
@@ -1929,13 +1947,7 @@ static int gen6_ring_flush(struct intel_engine *ring,
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
- } else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- }
+ intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
if (IS_GEN7(dev) && !invalidate && flush)
@@ -2123,7 +2135,6 @@ int intel_init_bsd_ring(struct drm_device *dev)
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
ring->write_tail = gen6_bsd_ring_write_tail;
- ring->flush = gen6_bsd_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
@@ -2132,6 +2143,7 @@ int intel_init_bsd_ring(struct drm_device *dev)
ring->write_tail = gen8_write_tail_lrc;
ring->init = init_ring_common_lrc;
}
+ ring->flush = gen8_ring_flush;
ring->add_request = gen8_add_request;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
@@ -2140,6 +2152,7 @@ int intel_init_bsd_ring(struct drm_device *dev)
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
} else {
+ ring->flush = gen6_bsd_ring_flush;
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
@@ -2182,7 +2195,6 @@ int intel_init_blt_ring(struct drm_device *dev)
ring->write_tail = ring_write_tail;
ring->init = init_ring_common;
- ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
@@ -2191,6 +2203,7 @@ int intel_init_blt_ring(struct drm_device *dev)
ring->write_tail = gen8_write_tail_lrc;
ring->init = init_ring_common_lrc;
}
+ ring->flush = gen8_ring_flush;
ring->add_request = gen8_add_request;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
@@ -2198,6 +2211,7 @@ int intel_init_blt_ring(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
} else {
+ ring->flush = gen6_ring_flush;
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
@@ -2223,7 +2237,6 @@ int intel_init_vebox_ring(struct drm_device *dev)
ring->write_tail = ring_write_tail;
ring->init = init_ring_common;
- ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
@@ -2232,6 +2245,7 @@ int intel_init_vebox_ring(struct drm_device *dev)
ring->write_tail = gen8_write_tail_lrc;
ring->init = init_ring_common_lrc;
}
+ ring->flush = gen8_ring_flush;
ring->add_request = gen8_add_request;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
@@ -2239,6 +2253,7 @@ int intel_init_vebox_ring(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
} else {
+ ring->flush = gen6_ring_flush;
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;