@@ -2728,7 +2728,7 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to);
+ struct drm_i915_gem_request *to_req);
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_engine_cs *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
@@ -2951,7 +2951,7 @@ out:
* i915_gem_object_sync - sync an object to a ring.
*
* @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. May be NULL.
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
@@ -2961,8 +2961,9 @@ out:
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to)
+ struct drm_i915_gem_request *to_req)
{
+ struct intel_engine_cs *to = to_req ? to_req->ring : NULL;
struct intel_engine_cs *from;
u32 seqno;
int ret, idx;
@@ -3948,7 +3949,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_sync(obj, req->ring);
+ ret = i915_gem_object_sync(obj, req);
if (ret)
return ret;
@@ -838,7 +838,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, req->ring);
+ ret = i915_gem_object_sync(obj, req);
if (ret)
return ret;
@@ -580,7 +580,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, req->ring);
+ ret = i915_gem_object_sync(obj, req);
if (ret)
return ret;