@@ -828,7 +828,7 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
struct i915_vma *vma;
@@ -838,7 +838,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
@@ -849,7 +849,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
}
if (flush_chipset)
- i915_gem_chipset_flush(ring->dev);
+ i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -857,7 +857,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(ring);
+ return intel_ring_invalidate_all_caches(req->ring);
}
static bool
@@ -1186,7 +1186,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
}
}
- ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;
@@ -569,11 +569,9 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- struct intel_engine_cs *ring = ringbuf->ring;
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -582,7 +580,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
@@ -598,7 +596,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf, ctx);
+ return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
}
/**
@@ -677,7 +675,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
+ ret = execlists_move_to_gpu(params->request, vmas);
if (ret)
return ret;