@@ -1922,6 +1922,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_request {
struct kref ref;
+ /** Is this request known to be complete? */
+ bool complete;
+
/** On Which ring this request was generated */
struct intel_engine_cs *ring;
@@ -1955,6 +1958,8 @@ struct drm_i915_gem_request {
};
void i915_gem_request_free(struct kref *req_ref);
+void i915_gem_complete_requests_ring(struct intel_engine_cs *ring,
+ bool lazy_coherency);
static inline uint32_t
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -1995,11 +2000,16 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
*pdst = src;
}
-/*
- * XXX: i915_gem_request_completed should be here but currently needs the
- * definition of i915_seqno_passed() which is below. It will be moved in
- * a later patch when the call to i915_seqno_passed() is obsoleted...
- */
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ if (req->complete)
+ return true;
+
+ i915_gem_complete_requests_ring(req->ring, lazy_coherency);
+
+ return req->complete;
+}
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
@@ -3064,18 +3074,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
- bool lazy_coherency)
-{
- u32 seqno;
-
- BUG_ON(req == NULL);
-
- seqno = req->ring->get_seqno(req->ring, lazy_coherency);
-
- return i915_seqno_passed(seqno, req->seqno);
-}
-
static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
struct drm_i915_gem_request *req)
{
@@ -2643,6 +2643,27 @@ void i915_gem_request_unreference_irq(struct drm_i915_gem_request *req)
spin_unlock_irqrestore(&ring->reqlist_lock, flags);
}
+void i915_gem_complete_requests_ring(struct intel_engine_cs *ring,
+ bool lazy_coherency)
+{
+ struct drm_i915_gem_request *req;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, lazy_coherency);
+ if (seqno == ring->last_read_seqno)
+ return;
+
+ list_for_each_entry(req, &ring->request_list, list) {
+ if (req->complete)
+ continue;
+
+ if (i915_seqno_passed(seqno, req->seqno))
+ req->complete = true;
+ }
+
+ ring->last_read_seqno = seqno;
+}
+
/**
* This function clears the request list as sequence numbers are passed.
*/
@@ -808,6 +808,7 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
kref_init(&request->ref);
request->ring = ring;
+ request->complete = false;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
@@ -2039,6 +2039,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
kref_init(&request->ref);
request->ring = ring;
+ request->complete = false;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
@@ -2131,6 +2132,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
+ ring->last_read_seqno = 0;
ring->set_seqno(ring, seqno);
ring->hangcheck.seqno = seqno;
}
@@ -271,6 +271,9 @@ struct intel_engine_cs {
bool gpu_caches_dirty;
bool fbc_dirty;
+ /* For optimising request completion events */
+ u32 last_read_seqno;
+
wait_queue_head_t irq_queue;
struct intel_context *default_context;