@@ -2214,14 +2214,35 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+
+bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
+ uint32_t seqno, bool *completed);
+
/**
* Returns true if seq1 is later than seq2.
*/
static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+i915_seqno_passed(struct intel_engine_cs *ring, uint32_t seq1, uint32_t seq2)
{
+#ifdef CONFIG_DRM_I915_SCHEDULER
+ bool completed;
+
+ if (i915_scheduler_is_seqno_in_flight(ring, seq2, &completed))
+ return completed;
+#endif
+
return (int32_t)(seq1 - seq2) >= 0;
}
+static inline int32_t
+i915_compare_seqno_values(uint32_t seq1, uint32_t seq2)
+{
+ int32_t diff = seq1 - seq2;
+
+ if (!diff)
+ return 0;
+
+ return (diff > 0) ? 1 : -1;
+}
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
@@ -1165,7 +1165,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
- if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ if (i915_seqno_passed(ring, ring->get_seqno(ring, true), seqno))
return 0;
timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
@@ -1201,7 +1201,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ if (i915_seqno_passed(ring, ring->get_seqno(ring, false), seqno)) {
ret = 0;
break;
}
@@ -2243,7 +2243,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
if (ring == NULL)
return;
- if (i915_seqno_passed(ring->get_seqno(ring, true),
+ if (i915_seqno_passed(ring, ring->get_seqno(ring, true),
obj->last_read_seqno))
i915_gem_object_move_to_inactive(obj);
}
@@ -2489,7 +2489,7 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_seqno_passed(completed_seqno, request->seqno))
+ if (i915_seqno_passed(ring, completed_seqno, request->seqno))
continue;
return request;
@@ -2620,7 +2620,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
*/
list_for_each_entry_safe(req, req_next, &ring->request_list, list) {
- if (!i915_seqno_passed(seqno, req->seqno))
+ if (!i915_seqno_passed(ring, seqno, req->seqno))
continue;
trace_i915_gem_request_retire(ring, req->seqno);
@@ -2639,14 +2639,14 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* before we free the context associated with the requests.
*/
list_for_each_entry_safe(obj, obj_next, &ring->active_list, ring_list) {
- if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ if (!i915_seqno_passed(ring, seqno, obj->last_read_seqno))
continue;
i915_gem_object_move_to_inactive(obj);
}
if (unlikely(ring->trace_irq_seqno &&
- i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+ i915_seqno_passed(ring, seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
ring->trace_irq_seqno = 0;
}
@@ -2750,7 +2750,7 @@ static bool
ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
- i915_seqno_passed(seqno, ring_last_seqno(ring)));
+ i915_seqno_passed(ring, seqno, ring_last_seqno(ring)));
}
static bool
@@ -2862,7 +2862,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
- return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
+ return i915_seqno_passed(ring, signaller->get_seqno(signaller, false), seqno);
}
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
@@ -49,6 +49,26 @@ int i915_scheduler_init(struct drm_device *dev)
return 0;
}
+bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
+ uint32_t seqno, bool *completed)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ bool found = false;
+ unsigned long flags;
+
+ if (!scheduler)
+ return false;
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+
+ /* Do stuff... */
+
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ return found;
+}
+
#else /* CONFIG_DRM_I915_SCHEDULER */
int i915_scheduler_init(struct drm_device *dev)
@@ -35,6 +35,9 @@ struct i915_scheduler {
uint32_t index;
};
+bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
+ uint32_t seqno, bool *completed);
+
#endif /* CONFIG_DRM_I915_SCHEDULER */
#endif /* _I915_SCHEDULER_H_ */
From: John Harrison <John.C.Harrison@Intel.com> The GPU scheduler can cause seqno values to become out of order. This means that a straight forward 'is seqno X > seqno Y' test is no longer valid. Instead, a call into the scheduler must be made to see if the value being queried is known to be out of order. --- drivers/gpu/drm/i915/i915_drv.h | 23 ++++++++++++++++++++++- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++------- drivers/gpu/drm/i915/i915_irq.c | 4 ++-- drivers/gpu/drm/i915/i915_scheduler.c | 20 ++++++++++++++++++++ drivers/gpu/drm/i915/i915_scheduler.h | 3 +++ 5 files changed, 54 insertions(+), 10 deletions(-)