Message ID | 20220225000623.1934438-2-John.C.Harrison@Intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Prep work for next GuC release | expand |
On Thu, 24 Feb 2022, John.C.Harrison@Intel.com wrote: > From: John Harrison <John.C.Harrison@Intel.com> > > The LRC descriptor pool is going away. So, stop using it as a check for > context registration, use the GuC id instead (being the thing that > actually gets registered with the GuC). > > Also, rename the set/clear/query helper functions for context id > mappings to better reflect their purpose and to differentiate from > other registration related helper functions. > > Signed-off-by: John Harrison <John.C.Harrison@Intel.com> > Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > --- > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 ++++++++++--------- > 1 file changed, 38 insertions(+), 31 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > index b3a429a92c0d..7fb889e14995 100644 > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > @@ -514,31 +514,20 @@ static inline bool guc_submission_initialized(struct intel_guc *guc) > return !!guc->lrc_desc_pool_vaddr; > } > > -static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) > +static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id) > { > - if (likely(guc_submission_initialized(guc))) { > - struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); > - unsigned long flags; > - > - memset(desc, 0, sizeof(*desc)); > + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); > > - /* > - * xarray API doesn't have xa_erase_irqsave wrapper, so calling > - * the lower level functions directly. > - */ > - xa_lock_irqsave(&guc->context_lookup, flags); > - __xa_erase(&guc->context_lookup, id); > - xa_unlock_irqrestore(&guc->context_lookup, flags); > - } > + memset(desc, 0, sizeof(*desc)); > } > > -static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) > +static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) > { > return __get_context(guc, id); > } > > -static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, > - struct intel_context *ce) > +static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id, > + struct intel_context *ce) > { > unsigned long flags; > > @@ -551,6 +540,24 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, > xa_unlock_irqrestore(&guc->context_lookup, flags); > } > > +static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) > +{ > + unsigned long flags; > + > + if (unlikely(!guc_submission_initialized(guc))) > + return; > + > + _reset_lrc_desc(guc, id); > + > + /* > + * xarray API doesn't have xa_erase_irqsave wrapper, so calling > + * the lower level functions directly. > + */ > + xa_lock_irqsave(&guc->context_lookup, flags); > + __xa_erase(&guc->context_lookup, id); > + xa_unlock_irqrestore(&guc->context_lookup, flags); > +} There are a plethora of static inlines in the guc .c files, and this adds more. How about just letting the compiler decide what's the best course of action, inline or not? I think hand rolling the inline is a micro optimization that you'd need to justify i.e. show that you're doing a better job than the compiler. The main downsides to using inlines are that you'll miss compiler warnings for unused functions, and it sets an example for people to start using inline more, while they should be an exception. BR, Jani. PS. I also don't much like the likely/unlikely annotations, but that's another can of worms. > + > static void decr_outstanding_submission_g2h(struct intel_guc *guc) > { > if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) > @@ -795,7 +802,7 @@ static int __guc_wq_item_append(struct i915_request *rq) > GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); > GEM_BUG_ON(context_guc_id_invalid(ce)); > GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); > - GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); > + GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)); > > /* Insert NOOP if this work queue item will wrap the tail pointer. */ > if (wqi_size > wq_space_until_wrap(ce)) { > @@ -923,7 +930,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc) > if (submit) { > struct intel_context *ce = request_to_scheduling_context(last); > > - if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && > + if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && > !intel_context_is_banned(ce))) { > ret = guc_lrc_desc_pin(ce, false); > if (unlikely(ret == -EPIPE)) { > @@ -1897,7 +1904,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) > > return submission_disabled(guc) || guc->stalled_request || > !i915_sched_engine_is_empty(sched_engine) || > - !lrc_desc_registered(guc, ce->guc_id.id); > + !ctx_id_mapped(guc, ce->guc_id.id); > } > > static void guc_submit_request(struct i915_request *rq) > @@ -1954,7 +1961,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) > else > ida_simple_remove(&guc->submission_state.guc_ids, > ce->guc_id.id); > - reset_lrc_desc(guc, ce->guc_id.id); > + clr_ctx_id_mapping(guc, ce->guc_id.id); > set_context_guc_id_invalid(ce); > } > if (!list_empty(&ce->guc_id.link)) > @@ -2250,10 +2257,10 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) > GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != > i915_gem_object_is_lmem(ce->ring->vma->obj)); > > - context_registered = lrc_desc_registered(guc, desc_idx); > + context_registered = ctx_id_mapped(guc, desc_idx); > > - reset_lrc_desc(guc, desc_idx); > - set_lrc_desc_registered(guc, desc_idx, ce); > + clr_ctx_id_mapping(guc, desc_idx); > + set_ctx_id_mapping(guc, desc_idx, ce); > > desc = __get_lrc_desc(guc, desc_idx); > desc->engine_class = engine_class_to_guc_class(engine->class); > @@ -2324,7 +2331,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) > } > spin_unlock_irqrestore(&ce->guc_state.lock, flags); > if (unlikely(disabled)) { > - reset_lrc_desc(guc, desc_idx); > + clr_ctx_id_mapping(guc, desc_idx); > return 0; /* Will get registered later */ > } > > @@ -2340,9 +2347,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) > with_intel_runtime_pm(runtime_pm, wakeref) > ret = register_context(ce, loop); > if (unlikely(ret == -EBUSY)) { > - reset_lrc_desc(guc, desc_idx); > + clr_ctx_id_mapping(guc, desc_idx); > } else if (unlikely(ret == -ENODEV)) { > - reset_lrc_desc(guc, desc_idx); > + clr_ctx_id_mapping(guc, desc_idx); > ret = 0; /* Will get registered later */ > } > } > @@ -2529,7 +2536,7 @@ static bool context_cant_unblock(struct intel_context *ce) > > return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || > context_guc_id_invalid(ce) || > - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || > + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) || > !intel_context_is_pinned(ce); > } > > @@ -2699,7 +2706,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) > bool disabled; > > GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); > - GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); > + GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); > GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); > GEM_BUG_ON(context_enabled(ce)); > > @@ -2816,7 +2823,7 @@ static void guc_context_destroy(struct kref *kref) > */ > spin_lock_irqsave(&guc->submission_state.lock, flags); > destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || > - !lrc_desc_registered(guc, ce->guc_id.id); > + !ctx_id_mapped(guc, ce->guc_id.id); > if (likely(!destroy)) { > if (!list_empty(&ce->guc_id.link)) > list_del_init(&ce->guc_id.link); > @@ -3059,7 +3066,7 @@ static void guc_signal_context_fence(struct intel_context *ce) > static bool context_needs_register(struct intel_context *ce, bool new_guc_id) > { > return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || > - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && > + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) && > !submission_disabled(ce_to_guc(ce)); > }
On 3/4/2022 03:59, Jani Nikula wrote: > On Thu, 24 Feb 2022, John.C.Harrison@Intel.com wrote: >> From: John Harrison <John.C.Harrison@Intel.com> >> >> The LRC descriptor pool is going away. So, stop using it as a check for >> context registration, use the GuC id instead (being the thing that >> actually gets registered with the GuC). >> >> Also, rename the set/clear/query helper functions for context id >> mappings to better reflect their purpose and to differentiate from >> other registration related helper functions. >> >> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> >> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> >> --- >> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 ++++++++++--------- >> 1 file changed, 38 insertions(+), 31 deletions(-) >> >> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c >> index b3a429a92c0d..7fb889e14995 100644 >> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c >> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c >> @@ -514,31 +514,20 @@ static inline bool guc_submission_initialized(struct intel_guc *guc) >> return !!guc->lrc_desc_pool_vaddr; >> } >> >> -static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) >> +static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id) >> { >> - if (likely(guc_submission_initialized(guc))) { >> - struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); >> - unsigned long flags; >> - >> - memset(desc, 0, sizeof(*desc)); >> + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); >> >> - /* >> - * xarray API doesn't have xa_erase_irqsave wrapper, so calling >> - * the lower level functions directly. >> - */ >> - xa_lock_irqsave(&guc->context_lookup, flags); >> - __xa_erase(&guc->context_lookup, id); >> - xa_unlock_irqrestore(&guc->context_lookup, flags); >> - } >> + memset(desc, 0, sizeof(*desc)); >> } >> >> -static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) >> +static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) >> { >> return __get_context(guc, id); >> } >> >> -static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, >> - struct intel_context *ce) >> +static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id, >> + struct intel_context *ce) >> { >> unsigned long flags; >> >> @@ -551,6 +540,24 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, >> xa_unlock_irqrestore(&guc->context_lookup, flags); >> } >> >> +static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) >> +{ >> + unsigned long flags; >> + >> + if (unlikely(!guc_submission_initialized(guc))) >> + return; >> + >> + _reset_lrc_desc(guc, id); >> + >> + /* >> + * xarray API doesn't have xa_erase_irqsave wrapper, so calling >> + * the lower level functions directly. >> + */ >> + xa_lock_irqsave(&guc->context_lookup, flags); >> + __xa_erase(&guc->context_lookup, id); >> + xa_unlock_irqrestore(&guc->context_lookup, flags); >> +} > There are a plethora of static inlines in the guc .c files, and this > adds more. How about just letting the compiler decide what's the best > course of action, inline or not? I think hand rolling the inline is a > micro optimization that you'd need to justify i.e. show that you're > doing a better job than the compiler. > > The main downsides to using inlines are that you'll miss compiler > warnings for unused functions, and it sets an example for people to > start using inline more, while they should be an exception. > > BR, > Jani. > > > PS. I also don't much like the likely/unlikely annotations, but that's > another can of worms. Technically, this patch isn't adding any new ones. It is just reworking existing functions in their existing style. So it basically comes under your last point of people just following the prevailing style because it's already there. I can add a task to the clean-up backlog to remove all mention of inline. Not sure why you think the (un)likely tags are bad? Again, I have no particular view either way. John. > > >> + >> static void decr_outstanding_submission_g2h(struct intel_guc *guc) >> { >> if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) >> @@ -795,7 +802,7 @@ static int __guc_wq_item_append(struct i915_request *rq) >> GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); >> GEM_BUG_ON(context_guc_id_invalid(ce)); >> GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); >> - GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); >> + GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)); >> >> /* Insert NOOP if this work queue item will wrap the tail pointer. */ >> if (wqi_size > wq_space_until_wrap(ce)) { >> @@ -923,7 +930,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc) >> if (submit) { >> struct intel_context *ce = request_to_scheduling_context(last); >> >> - if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && >> + if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && >> !intel_context_is_banned(ce))) { >> ret = guc_lrc_desc_pin(ce, false); >> if (unlikely(ret == -EPIPE)) { >> @@ -1897,7 +1904,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) >> >> return submission_disabled(guc) || guc->stalled_request || >> !i915_sched_engine_is_empty(sched_engine) || >> - !lrc_desc_registered(guc, ce->guc_id.id); >> + !ctx_id_mapped(guc, ce->guc_id.id); >> } >> >> static void guc_submit_request(struct i915_request *rq) >> @@ -1954,7 +1961,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) >> else >> ida_simple_remove(&guc->submission_state.guc_ids, >> ce->guc_id.id); >> - reset_lrc_desc(guc, ce->guc_id.id); >> + clr_ctx_id_mapping(guc, ce->guc_id.id); >> set_context_guc_id_invalid(ce); >> } >> if (!list_empty(&ce->guc_id.link)) >> @@ -2250,10 +2257,10 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) >> GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != >> i915_gem_object_is_lmem(ce->ring->vma->obj)); >> >> - context_registered = lrc_desc_registered(guc, desc_idx); >> + context_registered = ctx_id_mapped(guc, desc_idx); >> >> - reset_lrc_desc(guc, desc_idx); >> - set_lrc_desc_registered(guc, desc_idx, ce); >> + clr_ctx_id_mapping(guc, desc_idx); >> + set_ctx_id_mapping(guc, desc_idx, ce); >> >> desc = __get_lrc_desc(guc, desc_idx); >> desc->engine_class = engine_class_to_guc_class(engine->class); >> @@ -2324,7 +2331,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) >> } >> spin_unlock_irqrestore(&ce->guc_state.lock, flags); >> if (unlikely(disabled)) { >> - reset_lrc_desc(guc, desc_idx); >> + clr_ctx_id_mapping(guc, desc_idx); >> return 0; /* Will get registered later */ >> } >> >> @@ -2340,9 +2347,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) >> with_intel_runtime_pm(runtime_pm, wakeref) >> ret = register_context(ce, loop); >> if (unlikely(ret == -EBUSY)) { >> - reset_lrc_desc(guc, desc_idx); >> + clr_ctx_id_mapping(guc, desc_idx); >> } else if (unlikely(ret == -ENODEV)) { >> - reset_lrc_desc(guc, desc_idx); >> + clr_ctx_id_mapping(guc, desc_idx); >> ret = 0; /* Will get registered later */ >> } >> } >> @@ -2529,7 +2536,7 @@ static bool context_cant_unblock(struct intel_context *ce) >> >> return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || >> context_guc_id_invalid(ce) || >> - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || >> + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) || >> !intel_context_is_pinned(ce); >> } >> >> @@ -2699,7 +2706,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) >> bool disabled; >> >> GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); >> - GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); >> + GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); >> GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); >> GEM_BUG_ON(context_enabled(ce)); >> >> @@ -2816,7 +2823,7 @@ static void guc_context_destroy(struct kref *kref) >> */ >> spin_lock_irqsave(&guc->submission_state.lock, flags); >> destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || >> - !lrc_desc_registered(guc, ce->guc_id.id); >> + !ctx_id_mapped(guc, ce->guc_id.id); >> if (likely(!destroy)) { >> if (!list_empty(&ce->guc_id.link)) >> list_del_init(&ce->guc_id.link); >> @@ -3059,7 +3066,7 @@ static void guc_signal_context_fence(struct intel_context *ce) >> static bool context_needs_register(struct intel_context *ce, bool new_guc_id) >> { >> return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || >> - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && >> + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) && >> !submission_disabled(ce_to_guc(ce)); >> }
On Fri, 04 Mar 2022, John Harrison <john.c.harrison@intel.com> wrote: > On 3/4/2022 03:59, Jani Nikula wrote: >> On Thu, 24 Feb 2022, John.C.Harrison@Intel.com wrote: >> There are a plethora of static inlines in the guc .c files, and this >> adds more. How about just letting the compiler decide what's the best >> course of action, inline or not? I think hand rolling the inline is a >> micro optimization that you'd need to justify i.e. show that you're >> doing a better job than the compiler. >> >> The main downsides to using inlines are that you'll miss compiler >> warnings for unused functions, and it sets an example for people to >> start using inline more, while they should be an exception. >> >> BR, >> Jani. >> >> >> PS. I also don't much like the likely/unlikely annotations, but that's >> another can of worms. > Technically, this patch isn't adding any new ones. It is just reworking > existing functions in their existing style. So it basically comes under > your last point of people just following the prevailing style because > it's already there. > > I can add a task to the clean-up backlog to remove all mention of > inline. Not sure why you think the (un)likely tags are bad? Again, I > have no particular view either way. The (un)likely annotations are similar to static inlines in that they're often unjustified micro optimizations. Having plenty of them gives people the false impression using them should be the rule rather than the exception. And getting them wrong could have a high performance penalty. They're certainly not meant for regular error handling. Similar to static inlines, (un)likely have their uses, but they need to be used sparingly and the use needs to be justified. For static inlines, especially within .c files, just let the compiler do its job. For (un)likely, just let the CPU branch predictor do its job. The link's pretty old, but see also: https://lwn.net/Articles/420019/ BR, Jani.
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index b3a429a92c0d..7fb889e14995 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -514,31 +514,20 @@ static inline bool guc_submission_initialized(struct intel_guc *guc) return !!guc->lrc_desc_pool_vaddr; } -static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) +static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id) { - if (likely(guc_submission_initialized(guc))) { - struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); - unsigned long flags; - - memset(desc, 0, sizeof(*desc)); + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); - /* - * xarray API doesn't have xa_erase_irqsave wrapper, so calling - * the lower level functions directly. - */ - xa_lock_irqsave(&guc->context_lookup, flags); - __xa_erase(&guc->context_lookup, id); - xa_unlock_irqrestore(&guc->context_lookup, flags); - } + memset(desc, 0, sizeof(*desc)); } -static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) +static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) { return __get_context(guc, id); } -static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, - struct intel_context *ce) +static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id, + struct intel_context *ce) { unsigned long flags; @@ -551,6 +540,24 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, xa_unlock_irqrestore(&guc->context_lookup, flags); } +static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) +{ + unsigned long flags; + + if (unlikely(!guc_submission_initialized(guc))) + return; + + _reset_lrc_desc(guc, id); + + /* + * xarray API doesn't have xa_erase_irqsave wrapper, so calling + * the lower level functions directly. + */ + xa_lock_irqsave(&guc->context_lookup, flags); + __xa_erase(&guc->context_lookup, id); + xa_unlock_irqrestore(&guc->context_lookup, flags); +} + static void decr_outstanding_submission_g2h(struct intel_guc *guc) { if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) @@ -795,7 +802,7 @@ static int __guc_wq_item_append(struct i915_request *rq) GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); - GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); + GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)); /* Insert NOOP if this work queue item will wrap the tail pointer. */ if (wqi_size > wq_space_until_wrap(ce)) { @@ -923,7 +930,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc) if (submit) { struct intel_context *ce = request_to_scheduling_context(last); - if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && + if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && !intel_context_is_banned(ce))) { ret = guc_lrc_desc_pin(ce, false); if (unlikely(ret == -EPIPE)) { @@ -1897,7 +1904,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) return submission_disabled(guc) || guc->stalled_request || !i915_sched_engine_is_empty(sched_engine) || - !lrc_desc_registered(guc, ce->guc_id.id); + !ctx_id_mapped(guc, ce->guc_id.id); } static void guc_submit_request(struct i915_request *rq) @@ -1954,7 +1961,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) else ida_simple_remove(&guc->submission_state.guc_ids, ce->guc_id.id); - reset_lrc_desc(guc, ce->guc_id.id); + clr_ctx_id_mapping(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); } if (!list_empty(&ce->guc_id.link)) @@ -2250,10 +2257,10 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != i915_gem_object_is_lmem(ce->ring->vma->obj)); - context_registered = lrc_desc_registered(guc, desc_idx); + context_registered = ctx_id_mapped(guc, desc_idx); - reset_lrc_desc(guc, desc_idx); - set_lrc_desc_registered(guc, desc_idx, ce); + clr_ctx_id_mapping(guc, desc_idx); + set_ctx_id_mapping(guc, desc_idx, ce); desc = __get_lrc_desc(guc, desc_idx); desc->engine_class = engine_class_to_guc_class(engine->class); @@ -2324,7 +2331,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) } spin_unlock_irqrestore(&ce->guc_state.lock, flags); if (unlikely(disabled)) { - reset_lrc_desc(guc, desc_idx); + clr_ctx_id_mapping(guc, desc_idx); return 0; /* Will get registered later */ } @@ -2340,9 +2347,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) with_intel_runtime_pm(runtime_pm, wakeref) ret = register_context(ce, loop); if (unlikely(ret == -EBUSY)) { - reset_lrc_desc(guc, desc_idx); + clr_ctx_id_mapping(guc, desc_idx); } else if (unlikely(ret == -ENODEV)) { - reset_lrc_desc(guc, desc_idx); + clr_ctx_id_mapping(guc, desc_idx); ret = 0; /* Will get registered later */ } } @@ -2529,7 +2536,7 @@ static bool context_cant_unblock(struct intel_context *ce) return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || context_guc_id_invalid(ce) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) || !intel_context_is_pinned(ce); } @@ -2699,7 +2706,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) bool disabled; GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); - GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); + GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(context_enabled(ce)); @@ -2816,7 +2823,7 @@ static void guc_context_destroy(struct kref *kref) */ spin_lock_irqsave(&guc->submission_state.lock, flags); destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || - !lrc_desc_registered(guc, ce->guc_id.id); + !ctx_id_mapped(guc, ce->guc_id.id); if (likely(!destroy)) { if (!list_empty(&ce->guc_id.link)) list_del_init(&ce->guc_id.link); @@ -3059,7 +3066,7 @@ static void guc_signal_context_fence(struct intel_context *ce) static bool context_needs_register(struct intel_context *ce, bool new_guc_id) { return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) && !submission_disabled(ce_to_guc(ce)); }