Message ID | 1441367955-22006-2-git-send-email-michel.thierry@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 04/09/15 12:59, Michel Thierry wrote: > When WaEnableForceRestoreInCtxtDescForVCS is required, it is only > safe to send new contexts if the last reported event is "active to > idle". Otherwise the same context can fully preempt itself because > lite-restore is disabled. > > Testcase: igt/gem_concurrent_blit > Reported-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > Signed-off-by: Michel Thierry <michel.thierry@intel.com> > --- The lock-up I was seeing goes away with these 2 patches applied, so: Tested-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > drivers/gpu/drm/i915/intel_lrc.c | 24 ++++++++++++++++++------ > 1 file changed, 18 insertions(+), 6 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index d8b605f..c3fca4b 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) > return lrca >> 12; > } > > +static bool disable_lite_restore_wa(struct intel_engine_cs *ring) > +{ > + struct drm_device *dev = ring->dev; > + > + return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || > + (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && > + (ring->id == VCS || ring->id == VCS2); > +} > + > uint64_t intel_lr_context_descriptor(struct intel_context *ctx, > struct intel_engine_cs *ring) > { > - struct drm_device *dev = ring->dev; > struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; > uint64_t desc; > uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + > @@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, > > /* WaEnableForceRestoreInCtxtDescForVCS:skl */ > /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ > - if (((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || > - (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && > - (ring->id == VCS || ring->id == VCS2)) > + if (disable_lite_restore_wa(ring)) > desc |= GEN8_CTX_FORCE_RESTORE; > > return desc; > @@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) > u32 status_pointer; > u8 read_pointer; > u8 write_pointer; > - u32 status; > + u32 status = 0; > u32 status_id; > u32 submit_contexts = 0; > > @@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) > } > } > > - if (submit_contexts != 0) > + if (disable_lite_restore_wa(ring)) { > + /* Prevent a ctx to preempt itself */ > + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && > + (submit_contexts != 0)) > + execlists_context_unqueue(ring); > + } else if (submit_contexts != 0) { > execlists_context_unqueue(ring); > + } > > spin_unlock(&ring->execlist_lock); >
On 04/09/2015 12:59, Michel Thierry wrote: > When WaEnableForceRestoreInCtxtDescForVCS is required, it is only > safe to send new contexts if the last reported event is "active to > idle". Otherwise the same context can fully preempt itself because > lite-restore is disabled. > > Testcase: igt/gem_concurrent_blit > Reported-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > Signed-off-by: Michel Thierry <michel.thierry@intel.com> > --- > drivers/gpu/drm/i915/intel_lrc.c | 24 ++++++++++++++++++------ > 1 file changed, 18 insertions(+), 6 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index d8b605f..c3fca4b 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) > return lrca >> 12; > } > > +static bool disable_lite_restore_wa(struct intel_engine_cs *ring) > +{ > + struct drm_device *dev = ring->dev; > + > + return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || > + (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && > + (ring->id == VCS || ring->id == VCS2); > +} > + > uint64_t intel_lr_context_descriptor(struct intel_context *ctx, > struct intel_engine_cs *ring) > { > - struct drm_device *dev = ring->dev; > struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; > uint64_t desc; > uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + > @@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, > > /* WaEnableForceRestoreInCtxtDescForVCS:skl */ > /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ > - if (((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || > - (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && > - (ring->id == VCS || ring->id == VCS2)) > + if (disable_lite_restore_wa(ring)) > desc |= GEN8_CTX_FORCE_RESTORE; > > return desc; > @@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) > u32 status_pointer; > u8 read_pointer; > u8 write_pointer; > - u32 status; > + u32 status = 0; > u32 status_id; > u32 submit_contexts = 0; > > @@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) > } > } > > - if (submit_contexts != 0) > + if (disable_lite_restore_wa(ring)) { > + /* Prevent a ctx to preempt itself */ > + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && > + (submit_contexts != 0)) > + execlists_context_unqueue(ring); > + } else if (submit_contexts != 0) { > execlists_context_unqueue(ring); > + } > > spin_unlock(&ring->execlist_lock); > > Need changes if we decide to drop SKL, otherwise also it looks good to me, Reviewed-by: Arun Siluvery <arun.siluvery@linux.intel.com> regards Arun
On Thu, Sep 10, 2015 at 06:37:17PM +0100, Arun Siluvery wrote: > On 04/09/2015 12:59, Michel Thierry wrote: > >When WaEnableForceRestoreInCtxtDescForVCS is required, it is only > >safe to send new contexts if the last reported event is "active to > >idle". Otherwise the same context can fully preempt itself because > >lite-restore is disabled. > > > >Testcase: igt/gem_concurrent_blit > >Reported-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > >Signed-off-by: Michel Thierry <michel.thierry@intel.com> > >--- > > drivers/gpu/drm/i915/intel_lrc.c | 24 ++++++++++++++++++------ > > 1 file changed, 18 insertions(+), 6 deletions(-) > > > >diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > >index d8b605f..c3fca4b 100644 > >--- a/drivers/gpu/drm/i915/intel_lrc.c > >+++ b/drivers/gpu/drm/i915/intel_lrc.c > >@@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) > > return lrca >> 12; > > } > > > >+static bool disable_lite_restore_wa(struct intel_engine_cs *ring) > >+{ > >+ struct drm_device *dev = ring->dev; > >+ > >+ return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || > >+ (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && > >+ (ring->id == VCS || ring->id == VCS2); > >+} > >+ > > uint64_t intel_lr_context_descriptor(struct intel_context *ctx, > > struct intel_engine_cs *ring) > > { > >- struct drm_device *dev = ring->dev; > > struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; > > uint64_t desc; > > uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + > >@@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, > > > > /* WaEnableForceRestoreInCtxtDescForVCS:skl */ > > /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ > >- if (((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || > >- (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && > >- (ring->id == VCS || ring->id == VCS2)) > >+ if (disable_lite_restore_wa(ring)) > > desc |= GEN8_CTX_FORCE_RESTORE; > > > > return desc; > >@@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) > > u32 status_pointer; > > u8 read_pointer; > > u8 write_pointer; > >- u32 status; > >+ u32 status = 0; > > u32 status_id; > > u32 submit_contexts = 0; > > > >@@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) > > } > > } > > > >- if (submit_contexts != 0) > >+ if (disable_lite_restore_wa(ring)) { > >+ /* Prevent a ctx to preempt itself */ > >+ if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && > >+ (submit_contexts != 0)) > >+ execlists_context_unqueue(ring); > >+ } else if (submit_contexts != 0) { > > execlists_context_unqueue(ring); > >+ } > > > > spin_unlock(&ring->execlist_lock); > > > > > > Need changes if we decide to drop SKL, otherwise also it looks good to me, > Reviewed-by: Arun Siluvery <arun.siluvery@linux.intel.com> Both patches applied to dinq, thanks. -Daniel
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d8b605f..c3fca4b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) return lrca >> 12; } +static bool disable_lite_restore_wa(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + + return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || + (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && + (ring->id == VCS || ring->id == VCS2); +} + uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *ring) { - struct drm_device *dev = ring->dev; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; uint64_t desc; uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + @@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, /* WaEnableForceRestoreInCtxtDescForVCS:skl */ /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ - if (((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && - (ring->id == VCS || ring->id == VCS2)) + if (disable_lite_restore_wa(ring)) desc |= GEN8_CTX_FORCE_RESTORE; return desc; @@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) u32 status_pointer; u8 read_pointer; u8 write_pointer; - u32 status; + u32 status = 0; u32 status_id; u32 submit_contexts = 0; @@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) } } - if (submit_contexts != 0) + if (disable_lite_restore_wa(ring)) { + /* Prevent a ctx to preempt itself */ + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && + (submit_contexts != 0)) + execlists_context_unqueue(ring); + } else if (submit_contexts != 0) { execlists_context_unqueue(ring); + } spin_unlock(&ring->execlist_lock);
When WaEnableForceRestoreInCtxtDescForVCS is required, it is only safe to send new contexts if the last reported event is "active to idle". Otherwise the same context can fully preempt itself because lite-restore is disabled. Testcase: igt/gem_concurrent_blit Reported-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Michel Thierry <michel.thierry@intel.com> --- drivers/gpu/drm/i915/intel_lrc.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-)