@@ -3049,6 +3049,10 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
+ ret = I915_SCHEDULER_FLUSH_ALL(ring, true);
+ if (ret < 0)
+ return ret;
+
ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
@@ -4088,7 +4092,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
- int ret;
+ int i, ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
@@ -4098,6 +4102,16 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret)
return ret;
+ for_each_ring(ring, dev_priv, i) {
+ /* Need a mechanism to flush out scheduler entries that were
+ * submitted more than 'recent_enough' time ago as well! In the
+ * meantime, just flush everything out to ensure that entries
+ * can not sit around indefinitely. */
+ ret = I915_SCHEDULER_FLUSH_ALL(ring, false);
+ if (ret < 0)
+ return ret;
+ }
+
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -125,6 +125,13 @@ int i915_scheduler_flush_seqno(struct intel_engine_cs *ring, bool is_locked,
return 0;
}
+int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
+{
+ /* Do stuff... */
+
+ return 0;
+}
+
bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
uint32_t seqno, bool *completed)
{
@@ -58,9 +58,13 @@ struct i915_scheduler_queue_entry {
};
#ifdef CONFIG_DRM_I915_SCHEDULER
+# define I915_SCHEDULER_FLUSH_ALL(ring, locked) \
+ i915_scheduler_flush(ring, locked)
+
# define I915_SCHEDULER_FLUSH_SEQNO(ring, locked, seqno) \
i915_scheduler_flush_seqno(ring, locked, seqno)
#else
+# define I915_SCHEDULER_FLUSH_ALL(ring, locked) 0
# define I915_SCHEDULER_FLUSH_SEQNO(ring, locked, seqno) 0
#endif
@@ -81,6 +85,7 @@ struct i915_scheduler {
int i915_scheduler_fly_seqno(struct intel_engine_cs *ring, uint32_t seqno);
int i915_scheduler_remove(struct intel_engine_cs *ring);
+int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked);
int i915_scheduler_flush_seqno(struct intel_engine_cs *ring,
bool is_locked, uint32_t seqno);
bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
From: John Harrison <John.C.Harrison@Intel.com> When requesting that all GPU work is completed, it is now necessary to get the scheduler involved in order to flush out work that queued and not yet submitted. --- drivers/gpu/drm/i915/i915_gem.c | 16 +++++++++++++++- drivers/gpu/drm/i915/i915_scheduler.c | 7 +++++++ drivers/gpu/drm/i915/i915_scheduler.h | 5 +++++ 3 files changed, 27 insertions(+), 1 deletion(-)