@@ -2566,10 +2566,9 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *batch_obj,
- u32 *seqno);
-#define i915_add_request(ring, seqno) \
- __i915_add_request(ring, NULL, NULL, seqno)
+ struct drm_i915_gem_object *batch_obj);
+#define i915_add_request(ring) \
+ __i915_add_request(ring, NULL, NULL)
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -1108,7 +1108,7 @@ i915_gem_check_olr(struct drm_i915_gem_request *req)
ret = 0;
if (req == req->ring->outstanding_lazy_request)
- ret = i915_add_request(req->ring, NULL);
+ ret = i915_add_request(req->ring);
return ret;
}
@@ -2328,8 +2328,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *obj,
- u32 *out_seqno)
+ struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
@@ -2430,8 +2429,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
intel_mark_busy(dev_priv->dev);
}
- if (out_seqno)
- *out_seqno = request->seqno;
return 0;
}
@@ -990,7 +990,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj, NULL);
+ (void)__i915_add_request(ring, file, obj);
}
static int
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
@@ -1485,7 +1485,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj, NULL);
+ ret = __i915_add_request(ring, file, so.obj);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
@@ -218,7 +218,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
BUG_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, ring->outstanding_lazy_request);
- ret = i915_add_request(ring, NULL);
+ ret = i915_add_request(ring);
if (ret)
return ret;
@@ -289,7 +289,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, ring->outstanding_lazy_request);
- return i915_add_request(ring, NULL);
+ return i915_add_request(ring);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -2006,7 +2006,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
- ret = i915_add_request(ring, NULL);
+ ret = i915_add_request(ring);
if (ret)
return ret;
}