@@ -550,7 +550,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_gem_request_completed(work->flip_queued_req, true));
+ i915_gem_request_completed(work->flip_queued_req));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -2069,8 +2069,7 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
*pdst = src;
}
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
{
return req->complete;
}
@@ -1248,7 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
@@ -1289,7 +1289,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
- if (i915_gem_request_completed(req, false)) {
+ if (i915_gem_request_completed(req)) {
ret = 0;
break;
}
@@ -2339,7 +2339,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
if (obj->last_read_req == NULL)
return;
- if (i915_gem_request_completed(obj->last_read_req, true))
+ if (i915_gem_request_completed(obj->last_read_req))
i915_gem_object_move_to_inactive(obj);
}
@@ -2610,7 +2610,7 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
struct drm_i915_gem_request *request;
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_gem_request_completed(request, false))
+ if (i915_gem_request_completed(request))
continue;
return request;
@@ -2789,7 +2789,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_gem_request_completed(obj->last_read_req, true))
+ if (!i915_gem_request_completed(obj->last_read_req))
break;
i915_gem_object_move_to_inactive(obj);
@@ -2804,7 +2804,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_request,
list);
- if (!i915_gem_request_completed(request, true))
+ if (!i915_gem_request_completed(request))
break;
trace_i915_gem_request_retire(request);
@@ -2831,7 +2831,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
}
if (unlikely(ring->trace_irq_req &&
- i915_gem_request_completed(ring->trace_irq_req, true))) {
+ i915_gem_request_completed(ring->trace_irq_req))) {
ring->irq_put(ring);
i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
@@ -2725,7 +2725,7 @@ static bool
ring_idle(struct intel_engine_cs *ring)
{
return (list_empty(&ring->request_list) ||
- i915_gem_request_completed(ring_last_request(ring), false));
+ i915_gem_request_completed(ring_last_request(ring)));
}
static bool
@@ -9485,7 +9485,7 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_ring) {
- if (!i915_gem_request_completed(work->flip_queued_req, true))
+ if (!i915_gem_request_completed(work->flip_queued_req))
return false;
i915_gem_request_unreference_irq(work->flip_queued_req);