@@ -170,6 +170,8 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
+ struct list_head resubmit_link;
+
/** engine->request_list entry for this request */
struct list_head link;
@@ -580,6 +580,10 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
bool submit = false;
spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ if (engine->preempt_requested)
+ goto out;
+
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *cursor =
@@ -607,17 +611,71 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
nested_enable_signaling(last);
engine->execlist_first = rb;
}
+out:
spin_unlock_irqrestore(&engine->timeline->lock, flags);
return submit;
}
+static void unsubmit_inflight_requests(struct intel_engine_cs *engine,
+ struct list_head *resubmit)
+{
+ struct drm_i915_gem_request *rq, *prev;
+
+ assert_spin_locked(&engine->timeline->lock);
+
+ list_for_each_entry_safe_reverse(rq, prev,
+ &engine->timeline->requests, link) {
+ if (!i915_gem_request_completed(rq)) {
+ i915_gem_request_get(rq);
+ __i915_gem_request_unsubmit(rq);
+ list_add(&rq->resubmit_link, resubmit);
+ }
+ }
+}
+
+static void resubmit_inflight_requests(struct intel_engine_cs *engine,
+ struct list_head *resubmit)
+{
+ struct drm_i915_gem_request *rq, *prev;
+
+ assert_spin_locked(&engine->timeline->lock);
+
+ list_for_each_entry_safe(rq, prev, resubmit, resubmit_link) {
+ list_del(&rq->resubmit_link);
+ __i915_gem_request_submit(rq);
+ i915_gem_request_put(rq);
+ }
+}
+
static void i915_guc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
struct execlist_port *port = engine->execlist_port;
struct drm_i915_gem_request *rq;
+ unsigned long flags;
bool submit;
+ LIST_HEAD(resubmit);
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ if (engine->preempt_requested &&
+ intel_engine_is_preempt_finished(engine)) {
+ unsubmit_inflight_requests(engine, &resubmit);
+ engine->preempt_requested = false;
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+
+ rq = port[0].request;
+ while(rq && i915_gem_request_completed(rq)) {
+ i915_gem_request_put(rq);
+ if (port != engine->execlist_port)
+ break;
+ port++;
+ }
+ port = engine->execlist_port;
+ port[0].request = NULL;
+ port[1].request = NULL;
+ }
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
do {
rq = port[0].request;
@@ -632,6 +690,12 @@ static void i915_guc_irq_handler(unsigned long data)
if (!port[1].request)
submit = i915_guc_dequeue(engine);
} while (submit);
+
+ if (!list_empty(&resubmit)) {
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ resubmit_inflight_requests(engine, &resubmit);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ }
}
/*
We need to avoid sending new work if the preemption is in progress. Once it finished, we need to identify and unsubmit the preempted workload, submit new workload (potentially the one responsible for preemption) and resubmit the preempted workload. Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> --- drivers/gpu/drm/i915/i915_gem_request.h | 2 + drivers/gpu/drm/i915/i915_guc_submission.c | 64 ++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+)