@@ -549,8 +549,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_gem_request_get_seqno(work->flip_queued_req)));
+ i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -2072,6 +2072,12 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
*pdst = src;
}
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
@@ -3130,6 +3136,18 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ u32 seqno;
+
+ BUG_ON(req == NULL);
+
+ seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+
+ return i915_seqno_passed(seqno, req->seqno);
+}
+
static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
struct drm_i915_gem_request *req)
{
@@ -1223,8 +1223,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- i915_gem_request_get_seqno(req)))
+ if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
@@ -1260,8 +1259,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false),
- i915_gem_request_get_seqno(req))) {
+ if (i915_gem_request_completed(req, false)) {
ret = 0;
break;
}
@@ -2333,8 +2331,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
if (ring == NULL)
return;
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- i915_gem_request_get_seqno(obj->last_read_req)))
+ if (i915_gem_request_completed(obj->last_read_req, true))
i915_gem_object_move_to_inactive(obj);
}
@@ -2601,12 +2598,9 @@ struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
- u32 completed_seqno;
-
- completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_seqno_passed(completed_seqno, request->seqno))
+ if (i915_gem_request_completed(request, false))
continue;
return request;
@@ -2747,15 +2741,11 @@ void i915_gem_request_unreference_irq(struct drm_i915_gem_request *req)
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- uint32_t seqno;
-
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring, true);
-
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
@@ -2767,8 +2757,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno,
- i915_gem_request_get_seqno(obj->last_read_req)))
+ if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
@@ -2783,7 +2772,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
struct drm_i915_gem_request,
list);
- if (!i915_seqno_passed(seqno, request->seqno))
+ if (!i915_gem_request_completed(request, true))
break;
trace_i915_gem_request_retire(request);
@@ -2810,8 +2799,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
}
if (unlikely(ring->trace_irq_req &&
- i915_seqno_passed(seqno,
- i915_gem_request_get_seqno(ring->trace_irq_req)))) {
+ i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
@@ -2749,8 +2749,7 @@ static bool
ring_idle(struct intel_engine_cs *ring)
{
return (list_empty(&ring->request_list) ||
- i915_seqno_passed(ring->get_seqno(ring, false),
- i915_gem_request_get_seqno(ring_last_request(ring))));
+ i915_gem_request_completed(ring_last_request(ring), false));
}
static bool
@@ -9670,11 +9670,7 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_ring) {
- uint32_t s1 = work->flip_queued_ring->get_seqno(
- work->flip_queued_ring, true);
- uint32_t s2 = i915_gem_request_get_seqno(
- work->flip_queued_req);
- if (!i915_seqno_passed(s1, s2))
+ if (!i915_gem_request_completed(work->flip_queued_req, true))
return false;
i915_gem_request_unreference_irq(work->flip_queued_req);