diff mbox

[5/6] drm/i915/scheduler: Split insert_request

Message ID 20170518135944.13140-5-michal.winiarski@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michał Winiarski May 18, 2017, 1:59 p.m. UTC
We'd like to reuse the priolist lookup in request resubmission path,
let's split insert_request to make that happen.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 65 ++++++++++++++++++++++++----------------
 1 file changed, 39 insertions(+), 26 deletions(-)

Comments

Chris Wilson May 18, 2017, 2:27 p.m. UTC | #1
On Thu, May 18, 2017 at 03:59:43PM +0200, Michał Winiarski wrote:
> We'd like to reuse the priolist lookup in request resubmission path,
> let's split insert_request to make that happen.
> 
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_lrc.c | 65 ++++++++++++++++++++++++----------------
>  1 file changed, 39 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 1255724..c43ca1b 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -627,20 +627,15 @@ static void intel_lrc_irq_handler(unsigned long data)
>  	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
>  }
>  
> -static bool
> -insert_request(struct intel_engine_cs *engine,
> -	       struct i915_priotree *pt,
> -	       int prio)
> +static struct i915_priolist *
> +priolist_lookup(struct intel_engine_cs *engine, int prio, bool *first)
>  {
>  	struct i915_priolist *p;
>  	struct rb_node **parent, *rb;
> -	bool first = true;
>  
>  	if (unlikely(engine->no_priolist))
>  		prio = I915_PRIORITY_NORMAL;
> -
> -find_priolist:
> -	/* most positive priority is scheduled first, equal priorities fifo */
> +	*first = true;
>  	rb = NULL;
>  	parent = &engine->execlist_queue.rb_node;
>  	while (*parent) {
> @@ -650,10 +645,10 @@ insert_request(struct intel_engine_cs *engine,
>  			parent = &rb->rb_left;
>  		} else if (prio < p->priority) {
>  			parent = &rb->rb_right;
> -			first = false;
> +			*first = false;
>  		} else {
> -			list_add_tail(&pt->link, &p->requests);
> -			return false;
> +			*first = false;
> +			return p;
>  		}
>  	}
>  
> @@ -661,20 +656,10 @@ insert_request(struct intel_engine_cs *engine,
>  		p = &engine->default_priolist;
>  	} else {
>  		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
> -		/* Convert an allocation failure to a priority bump */
> +
>  		if (unlikely(!p)) {
> -			prio = I915_PRIORITY_NORMAL; /* recurses just once */
> -
> -			/* To maintain ordering with all rendering, after an
> -			 * allocation failure we have to disable all scheduling.
> -			 * Requests will then be executed in fifo, and schedule
> -			 * will ensure that dependencies are emitted in fifo.
> -			 * There will be still some reordering with existing
> -			 * requests, so if userspace lied about their
> -			 * dependencies that reordering may be visible.
> -			 */
> -			engine->no_priolist = true;
> -			goto find_priolist;
> +			*first = false;
> +			return ERR_PTR(-ENOMEM);

You still have to handle the allocation failure on unsubmit, so keep the
goto here.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1255724..c43ca1b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -627,20 +627,15 @@  static void intel_lrc_irq_handler(unsigned long data)
 	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 }
 
-static bool
-insert_request(struct intel_engine_cs *engine,
-	       struct i915_priotree *pt,
-	       int prio)
+static struct i915_priolist *
+priolist_lookup(struct intel_engine_cs *engine, int prio, bool *first)
 {
 	struct i915_priolist *p;
 	struct rb_node **parent, *rb;
-	bool first = true;
 
 	if (unlikely(engine->no_priolist))
 		prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
-	/* most positive priority is scheduled first, equal priorities fifo */
+	*first = true;
 	rb = NULL;
 	parent = &engine->execlist_queue.rb_node;
 	while (*parent) {
@@ -650,10 +645,10 @@  insert_request(struct intel_engine_cs *engine,
 			parent = &rb->rb_left;
 		} else if (prio < p->priority) {
 			parent = &rb->rb_right;
-			first = false;
+			*first = false;
 		} else {
-			list_add_tail(&pt->link, &p->requests);
-			return false;
+			*first = false;
+			return p;
 		}
 	}
 
@@ -661,20 +656,10 @@  insert_request(struct intel_engine_cs *engine,
 		p = &engine->default_priolist;
 	} else {
 		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
-		/* Convert an allocation failure to a priority bump */
+
 		if (unlikely(!p)) {
-			prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
-			/* To maintain ordering with all rendering, after an
-			 * allocation failure we have to disable all scheduling.
-			 * Requests will then be executed in fifo, and schedule
-			 * will ensure that dependencies are emitted in fifo.
-			 * There will be still some reordering with existing
-			 * requests, so if userspace lied about their
-			 * dependencies that reordering may be visible.
-			 */
-			engine->no_priolist = true;
-			goto find_priolist;
+			*first = false;
+			return ERR_PTR(-ENOMEM);
 		}
 	}
 
@@ -683,11 +668,39 @@  insert_request(struct intel_engine_cs *engine,
 	rb_insert_color(&p->node, &engine->execlist_queue);
 
 	INIT_LIST_HEAD(&p->requests);
-	list_add_tail(&pt->link, &p->requests);
 
-	if (first)
+	if (*first)
 		engine->execlist_first = &p->node;
 
+	return p;
+}
+
+static bool
+insert_request(struct intel_engine_cs *engine,
+	       struct i915_priotree *pt,
+	       int prio)
+{
+	struct i915_priolist *p;
+	bool first = false;
+
+	p = priolist_lookup(engine, prio, &first);
+
+	if (unlikely(IS_ERR(p))) {
+		/* To maintain ordering with all rendering, after an
+		 * allocation failure we have to disable all scheduling.
+		 * Requests will then be executed in fifo, and schedule
+		 * will ensure that dependencies are emitted in fifo.
+		 * There will be still some reordering with existing
+		 * request, so if userspace lied about their
+		 * dependdencies that reordering may be visible.
+		 */
+		engine->no_priolist = true;
+		p = priolist_lookup(engine, I915_PRIORITY_NORMAL, &first);
+	}
+
+	/* most positive priority is scheduled first, equal priorities fifo */
+	list_add_tail(&pt->link, &p->requests);
+
 	return first;
 }