Message ID | 20230308094106.203686-4-andi.shyti@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Fix error propagation amongst request | expand |
On 3/8/2023 10:41 AM, Andi Shyti wrote: > Make version of the request creation that doesn't hold any > lock. > > Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> > Cc: stable@vger.kernel.org Reviewed-by: Nirmoy Das <nirmoy.das@intel.com> > --- > drivers/gpu/drm/i915/i915_request.c | 43 +++++++++++++++++++---------- > drivers/gpu/drm/i915/i915_request.h | 2 ++ > 2 files changed, 31 insertions(+), 14 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c > index 72aed544f8714..5ddb0e02b06b7 100644 > --- a/drivers/gpu/drm/i915/i915_request.c > +++ b/drivers/gpu/drm/i915/i915_request.c > @@ -1028,18 +1028,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) > return ERR_PTR(ret); > } > > -struct i915_request * > -i915_request_create(struct intel_context *ce) > +static struct i915_request * > +__i915_request_create_locked(struct intel_context *ce) > { > struct i915_request *rq; > - struct intel_timeline *tl; > - > - if (intel_context_throttle(ce)) > - return ERR_PTR(-EINTR); > - > - tl = intel_context_timeline_lock(ce); > - if (IS_ERR(tl)) > - return ERR_CAST(tl); > + struct intel_timeline *tl = ce->timeline; > > /* Move our oldest request to the slab-cache (if not in use!) */ > rq = list_first_entry(&tl->requests, typeof(*rq), link); > @@ -1049,16 +1042,38 @@ i915_request_create(struct intel_context *ce) > intel_context_enter(ce); > rq = __i915_request_create(ce, GFP_KERNEL); > intel_context_exit(ce); /* active reference transferred to request */ > - if (IS_ERR(rq)) > - goto err_unlock; > > /* Check that we do not interrupt ourselves with a new request */ > rq->cookie = lockdep_pin_lock(&tl->mutex); > > return rq; > +} > + > +struct i915_request * > +i915_request_create_locked(struct intel_context *ce) > +{ > + intel_context_assert_timeline_is_locked(ce->timeline); > + > + if (intel_context_throttle(ce)) > + return ERR_PTR(-EINTR); > + > + return __i915_request_create_locked(ce); > +} > + > +struct i915_request * > +i915_request_create(struct intel_context *ce) > +{ > + struct i915_request *rq; > + struct intel_timeline *tl; > + > + tl = intel_context_timeline_lock(ce); > + if (IS_ERR(tl)) > + return ERR_CAST(tl); > + > + rq = __i915_request_create_locked(ce); > + if (IS_ERR(rq)) > + intel_context_timeline_unlock(tl); > > -err_unlock: > - intel_context_timeline_unlock(tl); > return rq; > } > > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h > index f5e1bb5e857aa..bb48bd4605c03 100644 > --- a/drivers/gpu/drm/i915/i915_request.h > +++ b/drivers/gpu/drm/i915/i915_request.h > @@ -374,6 +374,8 @@ struct i915_request * __must_check > __i915_request_create(struct intel_context *ce, gfp_t gfp); > struct i915_request * __must_check > i915_request_create(struct intel_context *ce); > +struct i915_request * __must_check > +i915_request_create_locked(struct intel_context *ce); > > void __i915_request_skip(struct i915_request *rq); > bool i915_request_set_error_once(struct i915_request *rq, int error);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 72aed544f8714..5ddb0e02b06b7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1028,18 +1028,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) return ERR_PTR(ret); } -struct i915_request * -i915_request_create(struct intel_context *ce) +static struct i915_request * +__i915_request_create_locked(struct intel_context *ce) { struct i915_request *rq; - struct intel_timeline *tl; - - if (intel_context_throttle(ce)) - return ERR_PTR(-EINTR); - - tl = intel_context_timeline_lock(ce); - if (IS_ERR(tl)) - return ERR_CAST(tl); + struct intel_timeline *tl = ce->timeline; /* Move our oldest request to the slab-cache (if not in use!) */ rq = list_first_entry(&tl->requests, typeof(*rq), link); @@ -1049,16 +1042,38 @@ i915_request_create(struct intel_context *ce) intel_context_enter(ce); rq = __i915_request_create(ce, GFP_KERNEL); intel_context_exit(ce); /* active reference transferred to request */ - if (IS_ERR(rq)) - goto err_unlock; /* Check that we do not interrupt ourselves with a new request */ rq->cookie = lockdep_pin_lock(&tl->mutex); return rq; +} + +struct i915_request * +i915_request_create_locked(struct intel_context *ce) +{ + intel_context_assert_timeline_is_locked(ce->timeline); + + if (intel_context_throttle(ce)) + return ERR_PTR(-EINTR); + + return __i915_request_create_locked(ce); +} + +struct i915_request * +i915_request_create(struct intel_context *ce) +{ + struct i915_request *rq; + struct intel_timeline *tl; + + tl = intel_context_timeline_lock(ce); + if (IS_ERR(tl)) + return ERR_CAST(tl); + + rq = __i915_request_create_locked(ce); + if (IS_ERR(rq)) + intel_context_timeline_unlock(tl); -err_unlock: - intel_context_timeline_unlock(tl); return rq; } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index f5e1bb5e857aa..bb48bd4605c03 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -374,6 +374,8 @@ struct i915_request * __must_check __i915_request_create(struct intel_context *ce, gfp_t gfp); struct i915_request * __must_check i915_request_create(struct intel_context *ce); +struct i915_request * __must_check +i915_request_create_locked(struct intel_context *ce); void __i915_request_skip(struct i915_request *rq); bool i915_request_set_error_once(struct i915_request *rq, int error);
Make version of the request creation that doesn't hold any lock. Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/i915_request.c | 43 +++++++++++++++++++---------- drivers/gpu/drm/i915/i915_request.h | 2 ++ 2 files changed, 31 insertions(+), 14 deletions(-)