@@ -2151,7 +2151,8 @@ struct drm_i915_gem_request {
struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
- /** Batch buffer related to this request if any */
+ /** Batch buffer related to this request if any (used for
+ error state dump only) */
struct drm_i915_gem_object *batch_obj;
/** Time at which this request was emitted, in jiffies. */
@@ -2466,6 +2466,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
+ WARN_ON(request->batch_obj && obj);
request->batch_obj = obj;
if (!i915.enable_execlists) {
@@ -4856,8 +4857,16 @@ i915_gem_init_hw(struct drm_device *dev)
/* Now it is safe to go back round and do everything else: */
for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_request *req;
+
WARN_ON(!ring->default_context);
+ ret = dev_priv->gt.alloc_request(ring, ring->default_context, &req);
+ if (ret) {
+ i915_gem_cleanup_ringbuffer(dev);
+ return ret;
+ }
+
if (ring->id == RCS) {
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(ring, i);
@@ -4866,6 +4875,7 @@ i915_gem_init_hw(struct drm_device *dev)
ret = i915_ppgtt_init_ring(ring);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_unreference(req);
i915_gem_cleanup_ringbuffer(dev);
return ret;
}
@@ -4873,8 +4883,16 @@ i915_gem_init_hw(struct drm_device *dev)
ret = i915_gem_context_enable(ring);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_unreference(req);
i915_gem_cleanup_ringbuffer(dev);
+ return ret;
+ }
+ ret = i915_add_request_no_flush(ring);
+ if (ret) {
+ DRM_ERROR("Add request ring #%d failed: %d\n", i, ret);
+ i915_gem_request_unreference(req);
+ i915_gem_cleanup_ringbuffer(dev);
return ret;
}
}
@@ -1211,16 +1211,11 @@ int i915_ppgtt_init_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int ret = 0;
if (!ppgtt)
return 0;
- ret = ppgtt->switch_mm(ppgtt, ring);
- if (ret != 0)
- return ret;
-
- return ret;
+ return ppgtt->switch_mm(ppgtt, ring);
}
struct i915_hw_ppgtt *
@@ -173,7 +173,8 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj, true);
+ WARN_ON(ring->outstanding_lazy_request->batch_obj);
+ ring->outstanding_lazy_request->batch_obj = so.obj;
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
@@ -1592,8 +1592,6 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
{
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
ret = i915_gem_render_state_prepare(ring, &so);
@@ -1612,9 +1610,9 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj, true);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
+ WARN_ON(ring->outstanding_lazy_request->batch_obj);
+ ring->outstanding_lazy_request->batch_obj = so.obj;
+ /* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
return ret;