@@ -2170,7 +2170,7 @@ int __i915_add_request(struct intel_engine *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ringbuffer_get_tail(ring, ctx);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2191,7 +2191,7 @@ int __i915_add_request(struct intel_engine *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ringbuffer_get_tail(ring, ctx);
ret = ring->add_request(ring, ctx);
if (ret)
@@ -982,14 +982,15 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct i915_hw_context *ctx)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ struct intel_ringbuffer *ringbuf;
+ int i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
- ret = intel_ringbuffer_begin(ring, ctx, 4 * 3);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4 * 3);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return PTR_ERR(ringbuf);
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1230,9 +1231,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- goto err;
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf)) {
+ ret = (PTR_ERR(ringbuf));
+ goto err;
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -8832,7 +8832,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
- ret = intel_ring_cacheline_align(ring);
+ ret = intel_ringbuffer_cacheline_align(ring, ring->default_context);
if (ret)
goto err_unpin;
@@ -376,9 +376,9 @@ gen8_render_ring_flush(struct intel_engine *ring,
struct i915_hw_context *ctx,
u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_ringbuffer *ringbuf;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
- int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -397,9 +397,9 @@ gen8_render_ring_flush(struct intel_engine *ring,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_ringbuffer_begin(ring, ctx, 6);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
@@ -735,11 +735,11 @@ static int
gen8_add_request(struct intel_engine *ring,
struct i915_hw_context *ctx)
{
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@@ -1744,33 +1744,37 @@ static int __intel_ring_prepare(struct intel_engine *ring,
return 0;
}
-int intel_ringbuffer_begin(struct intel_engine *ring,
- struct i915_hw_context *ctx,
- int num_dwords)
+struct intel_ringbuffer *
+intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
+ int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct intel_ringbuffer *ringbuf = intel_ringbuffer_get(ring, ctx);
int ret;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
- return ret;
+ return ERR_PTR(ret);
ret = __intel_ring_prepare(ring, ctx, num_dwords * sizeof(uint32_t));
if (ret)
- return ret;
+ return ERR_PTR(ret);
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- __get_ringbuf(ring)->space -= num_dwords * sizeof(uint32_t);
- return 0;
+ ringbuf->space -= num_dwords * sizeof(uint32_t);
+
+ return ringbuf;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine *ring)
+int intel_ringbuffer_cacheline_align(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
int num_dwords = (64 - (__get_ringbuf(ring)->tail & 63)) / sizeof(uint32_t);
int ret;
@@ -1845,11 +1849,11 @@ static int gen8_ring_flush(struct intel_engine *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
cmd = MI_FLUSH_DW + 1;
@@ -1905,11 +1909,11 @@ gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
!(flags & I915_DISPATCH_SECURE);
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
@@ -284,26 +284,66 @@ intel_write_status_page(struct intel_engine *ring,
void intel_cleanup_ring(struct intel_engine *ring);
-int __must_check intel_ringbuffer_begin(struct intel_engine *ring,
- struct i915_hw_context *ctx, int n);
-#define intel_ring_begin(ring, n) intel_ringbuffer_begin(ring, NULL, n)
-int __must_check intel_ring_cacheline_align(struct intel_engine *ring);
-static inline void intel_ring_emit(struct intel_engine *ring,
- u32 data)
+struct intel_ringbuffer *
+intel_ringbuffer_get(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+
+struct intel_ringbuffer *
+intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx, int n);
+
+static inline int __must_check
+intel_ring_begin(struct intel_engine *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = intel_ringbuffer_begin(ring, ring->default_context, data);
+ if (IS_ERR(ringbuf))
+ return (PTR_ERR(ringbuf));
+
+ return 0;
+}
+
+int __must_check
+intel_ringbuffer_cacheline_align(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+static inline void
+intel_ringbuffer_emit(struct intel_ringbuffer *ringbuf, u32 data)
+{
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-static inline void intel_ring_advance(struct intel_engine *ring)
+
+static inline void
+intel_ring_emit(struct intel_engine *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ intel_ringbuffer_emit(&ring->default_ringbuf, data);
+}
+static inline void
+intel_ringbuffer_advance(struct intel_ringbuffer *ringbuf)
+{
ringbuf->tail &= ringbuf->size - 1;
}
-void intel_ringbuffer_advance_and_submit(struct intel_engine *ring,
- struct i915_hw_context *ctx);
+
+static inline void
+intel_ring_advance(struct intel_engine *ring)
+{
+ intel_ringbuffer_advance(&ring->default_ringbuf);
+}
+
+void
+intel_ringbuffer_advance_and_submit(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+
+static inline u32
+intel_ringbuffer_get_tail(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf = intel_ringbuffer_get(ring, ctx);
+ return ringbuf->tail;
+}
int __must_check intel_ring_idle(struct intel_engine *ring);
void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno);
@@ -321,11 +361,6 @@ int intel_init_vebox_ring(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_engine *ring);
void intel_ring_setup_status_page(struct intel_engine *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine *ring)
-{
- return __get_ringbuf(ring)->tail;
-}
-
static inline u32 intel_ring_get_seqno(struct intel_engine *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);