Message ID | 20171206224741.8600-4-dhinakaran.pandiyan@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Dec 06, 2017 at 10:47:40PM +0000, Dhinakaran Pandiyan wrote: > When DC states are enabled and PSR is active, the hardware enters DC5/DC6 > states resulting in frame counter resets. The frame counter resets mess > up the vblank counting logic. So in order to disable DC states when > vblank interrupts are required and to disallow DC states when vblanks > interrupts are already enabled, introduce a new power domain. Since this > power domain reference needs to be acquired and released in atomic context, > the corresponding _get() and _put() methods skip the power_domain mutex. \o/ > > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> > --- > drivers/gpu/drm/i915/i915_drv.h | 5 ++ > drivers/gpu/drm/i915/intel_drv.h | 3 +- > drivers/gpu/drm/i915/intel_runtime_pm.c | 125 +++++++++++++++++++++++++++++++- > 3 files changed, 128 insertions(+), 5 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 18d42885205b..ba9107ec1ed1 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -397,6 +397,7 @@ enum intel_display_power_domain { > POWER_DOMAIN_AUX_C, > POWER_DOMAIN_AUX_D, > POWER_DOMAIN_GMBUS, > + POWER_DOMAIN_VBLANK, > POWER_DOMAIN_MODESET, > POWER_DOMAIN_INIT, > > @@ -1475,6 +1476,10 @@ struct i915_power_well { > bool has_vga:1; > bool has_fuses:1; > } hsw; > + struct { > + spinlock_t lock; > + bool was_disabled; > + } dc_off; what about a more generic name here? something like + struct { + spinlock_t lock; + bool was_disabled; + } atomic; + is_atomic; //or needs_atomic so... > }; > const struct i915_power_well_ops *ops; > }; > diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h > index 30f791f89d64..93ca503f18bb 100644 > --- a/drivers/gpu/drm/i915/intel_drv.h > +++ b/drivers/gpu/drm/i915/intel_drv.h > @@ -1865,7 +1865,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, > bool override, unsigned int mask); > bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, > enum dpio_channel ch, bool override); > - > +bool intel_display_power_vblank_get(struct drm_i915_private *dev_priv); > +void intel_display_power_vblank_put(struct drm_i915_private *dev_priv); > > /* intel_pm.c */ > void intel_init_clock_gating(struct drm_i915_private *dev_priv); > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c > index f88f2c070c5f..f1807bd74242 100644 > --- a/drivers/gpu/drm/i915/intel_runtime_pm.c > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c > @@ -126,6 +126,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) > return "AUX_D"; > case POWER_DOMAIN_GMBUS: > return "GMBUS"; > + case POWER_DOMAIN_VBLANK: > + return "VBLANK"; > case POWER_DOMAIN_INIT: > return "INIT"; > case POWER_DOMAIN_MODESET: > @@ -196,10 +198,17 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, > if (power_well->always_on) > continue; > > - if (!power_well->hw_enabled) { > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_lock(&power_well->dc_off.lock); ... instead of a specif pw check here you would have + if (power_well->is_atomic) + spin_lock(&power_well->atomic.lock) > + > + if (!power_well->hw_enabled) > is_enabled = false; > + > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_unlock(&power_well->dc_off.lock); > + > + if (!is_enabled) > break; > - } > } > > return is_enabled; > @@ -724,6 +733,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, > skl_enable_dc6(dev_priv); > else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) > gen9_enable_dc5(dev_priv); > + power_well->dc_off.was_disabled = true; > } > > static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, > @@ -1441,6 +1451,77 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, > chv_set_pipe_power_well(dev_priv, power_well, false); > } > > +/** > + * intel_display_power_vblank_get - acquire a VBLANK power domain reference atomically > + * @dev_priv: i915 device instance > + * > + * This function gets a POWER_DOMAIN_VBLANK reference without blocking and > + * returns true if the DC_OFF power well was disabled since this function was > + * called the last time. > + */ > +bool intel_display_power_vblank_get(struct drm_i915_private *dev_priv) > +{ > + struct i915_power_domains *power_domains = &dev_priv->power_domains; > + struct i915_power_well *power_well; > + bool needs_restore = false; > + > + if (!HAS_CSR(dev_priv) || !dev_priv->psr.source_ok) > + return false; > + > + /* The corresponding CRTC should be active by the time driver turns on > + * vblank interrupts, which in turn means the enabled pipe power domain > + * would have acquired the device runtime pm reference. > + */ > + intel_runtime_pm_get_if_in_use(dev_priv); > + > + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(POWER_DOMAIN_VBLANK)) { > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_lock(&power_well->dc_off.lock); > + > + intel_power_well_get(dev_priv, power_well); > + > + if (power_well->id == SKL_DISP_PW_DC_OFF) { > + needs_restore = power_well->dc_off.was_disabled; > + power_well->dc_off.was_disabled = false; > + spin_unlock(&power_well->dc_off.lock); I don't understand why you need the was_disabled here and not the counter like the regular pw? But also overall I can't see how this is avoiding the mutex_lock(&power_domains->lock); in general. Is there a way to make it more generic in a way that we could define atomic pw and regular pw and we could see which one would be taking the atomic path easily? Thanks, Rodrigo. > + } > + } > + atomic_inc(&power_domains->domain_use_count[POWER_DOMAIN_VBLANK]); > + > + return needs_restore; > +} > + > +/** > + * intel_display_power_vblank_put - drop a VBLANK power domain reference atomically > + * > + * A non-blocking version intel_display_power_put() specifically to control the > + * DC_OFF power well in atomic contexts. > + */ > +void intel_display_power_vblank_put(struct drm_i915_private *dev_priv) > +{ > + struct i915_power_domains *power_domains = &dev_priv->power_domains; > + struct i915_power_well *power_well; > + > + if (!HAS_CSR(dev_priv) || !dev_priv->psr.source_ok) > + return; > + > + WARN(atomic_dec_return(&power_domains->domain_use_count[POWER_DOMAIN_VBLANK]) < 0, > + "Use count on domain %s was already zero\n", > + intel_display_power_domain_str(POWER_DOMAIN_VBLANK)); > + > + for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(POWER_DOMAIN_VBLANK)) { > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_lock(&power_well->dc_off.lock); > + > + intel_power_well_put(dev_priv, power_well); > + > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_unlock(&power_well->dc_off.lock); > + } > + > + intel_runtime_pm_put(dev_priv); > +} > + > static void > __intel_display_power_get_domain(struct drm_i915_private *dev_priv, > enum intel_display_power_domain domain) > @@ -1448,9 +1529,16 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, > struct i915_power_domains *power_domains = &dev_priv->power_domains; > struct i915_power_well *power_well; > > - for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) > + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) { > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_lock(&power_well->dc_off.lock); > + > intel_power_well_get(dev_priv, power_well); > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_unlock(&power_well->dc_off.lock); > + } > + > atomic_inc(&power_domains->domain_use_count[domain]); > } > > @@ -1541,9 +1629,16 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, > "Use count on domain %s was already zero\n", > intel_display_power_domain_str(domain)); > > - for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) > + for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_lock(&power_well->dc_off.lock); > + > intel_power_well_put(dev_priv, power_well); > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_unlock(&power_well->dc_off.lock); > + } > + > mutex_unlock(&power_domains->lock); > > intel_runtime_pm_put(dev_priv); > @@ -1706,6 +1801,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, > SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ > BIT_ULL(POWER_DOMAIN_MODESET) | \ > BIT_ULL(POWER_DOMAIN_AUX_A) | \ > + BIT_ULL(POWER_DOMAIN_VBLANK) | \ > BIT_ULL(POWER_DOMAIN_INIT)) > > #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ > @@ -2342,6 +2438,9 @@ static struct i915_power_well cnl_power_wells[] = { > .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, > .ops = &gen9_dc_off_power_well_ops, > .id = SKL_DISP_PW_DC_OFF, > + { > + .dc_off.was_disabled = false, > + }, > }, > { > .name = "power well 2", > @@ -2470,6 +2569,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) > int intel_power_domains_init(struct drm_i915_private *dev_priv) > { > struct i915_power_domains *power_domains = &dev_priv->power_domains; > + struct i915_power_well *dc_off_pw; > > i915_modparams.disable_power_well = > sanitize_disable_power_well_option(dev_priv, > @@ -2507,6 +2607,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) > set_power_wells(power_domains, i9xx_always_on_power_well); > } > > + dc_off_pw = lookup_power_well(dev_priv, SKL_DISP_PW_DC_OFF); > + if (dc_off_pw) > + spin_lock_init(&dc_off_pw->dc_off.lock); > + > assert_power_well_ids_unique(dev_priv); > > return 0; > @@ -2555,8 +2659,14 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) > mutex_lock(&power_domains->lock); > for_each_power_well(dev_priv, power_well) { > power_well->ops->sync_hw(dev_priv, power_well); > + > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_lock(&power_well->dc_off.lock); > + > power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, > power_well); > + if (power_well->id == SKL_DISP_PW_DC_OFF) > + spin_unlock(&power_well->dc_off.lock); > } > mutex_unlock(&power_domains->lock); > } > @@ -3079,6 +3189,13 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) > if (!power_well->domains) > continue; > > + /* > + * Reading SKL_DISP_PW_DC_OFF power_well->count without its > + * private spinlock should be safe here as power_well->count > + * gets modified only in power_well_get() and power_well_put() > + * and they are not called until drm_crtc_vblank_on(). > + */ > + > enabled = power_well->ops->is_enabled(dev_priv, power_well); > if ((power_well->count || power_well->always_on) != enabled) > DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", > -- > 2.11.0 >
On Wed, 2017-12-06 at 15:54 -0800, Rodrigo Vivi wrote: > On Wed, Dec 06, 2017 at 10:47:40PM +0000, Dhinakaran Pandiyan wrote: > > When DC states are enabled and PSR is active, the hardware enters DC5/DC6 > > states resulting in frame counter resets. The frame counter resets mess > > up the vblank counting logic. So in order to disable DC states when > > vblank interrupts are required and to disallow DC states when vblanks > > interrupts are already enabled, introduce a new power domain. Since this > > power domain reference needs to be acquired and released in atomic context, > > the corresponding _get() and _put() methods skip the power_domain mutex. > > \o/ > > > > > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> > > --- > > drivers/gpu/drm/i915/i915_drv.h | 5 ++ > > drivers/gpu/drm/i915/intel_drv.h | 3 +- > > drivers/gpu/drm/i915/intel_runtime_pm.c | 125 +++++++++++++++++++++++++++++++- > > 3 files changed, 128 insertions(+), 5 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > > index 18d42885205b..ba9107ec1ed1 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -397,6 +397,7 @@ enum intel_display_power_domain { > > POWER_DOMAIN_AUX_C, > > POWER_DOMAIN_AUX_D, > > POWER_DOMAIN_GMBUS, > > + POWER_DOMAIN_VBLANK, > > POWER_DOMAIN_MODESET, > > POWER_DOMAIN_INIT, > > > > @@ -1475,6 +1476,10 @@ struct i915_power_well { > > bool has_vga:1; > > bool has_fuses:1; > > } hsw; > > + struct { > > + spinlock_t lock; > > + bool was_disabled; > > + } dc_off; > > what about a more generic name here? > something like > > + struct { > + spinlock_t lock; > + bool was_disabled; > + } atomic; > + is_atomic; //or needs_atomic > > so... > > > }; > > const struct i915_power_well_ops *ops; > > }; > > diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h > > index 30f791f89d64..93ca503f18bb 100644 > > --- a/drivers/gpu/drm/i915/intel_drv.h > > +++ b/drivers/gpu/drm/i915/intel_drv.h > > @@ -1865,7 +1865,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, > > bool override, unsigned int mask); > > bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, > > enum dpio_channel ch, bool override); > > - > > +bool intel_display_power_vblank_get(struct drm_i915_private *dev_priv); > > +void intel_display_power_vblank_put(struct drm_i915_private *dev_priv); > > > > /* intel_pm.c */ > > void intel_init_clock_gating(struct drm_i915_private *dev_priv); > > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c > > index f88f2c070c5f..f1807bd74242 100644 > > --- a/drivers/gpu/drm/i915/intel_runtime_pm.c > > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c > > @@ -126,6 +126,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) > > return "AUX_D"; > > case POWER_DOMAIN_GMBUS: > > return "GMBUS"; > > + case POWER_DOMAIN_VBLANK: > > + return "VBLANK"; > > case POWER_DOMAIN_INIT: > > return "INIT"; > > case POWER_DOMAIN_MODESET: > > @@ -196,10 +198,17 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, > > if (power_well->always_on) > > continue; > > > > - if (!power_well->hw_enabled) { > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_lock(&power_well->dc_off.lock); > > ... instead of a specif pw check here you would have > > + if (power_well->is_atomic) > + spin_lock(&power_well->atomic.lock) > > > + > > + if (!power_well->hw_enabled) > > is_enabled = false; > > + > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_unlock(&power_well->dc_off.lock); > > + > > + if (!is_enabled) > > break; > > - } > > } > > > > return is_enabled; > > @@ -724,6 +733,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, > > skl_enable_dc6(dev_priv); > > else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) > > gen9_enable_dc5(dev_priv); > > + power_well->dc_off.was_disabled = true; > > } > > > > static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, > > @@ -1441,6 +1451,77 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, > > chv_set_pipe_power_well(dev_priv, power_well, false); > > } > > > > +/** > > + * intel_display_power_vblank_get - acquire a VBLANK power domain reference atomically > > + * @dev_priv: i915 device instance > > + * > > + * This function gets a POWER_DOMAIN_VBLANK reference without blocking and > > + * returns true if the DC_OFF power well was disabled since this function was > > + * called the last time. > > + */ > > +bool intel_display_power_vblank_get(struct drm_i915_private *dev_priv) > > +{ > > + struct i915_power_domains *power_domains = &dev_priv->power_domains; > > + struct i915_power_well *power_well; > > + bool needs_restore = false; > > + > > + if (!HAS_CSR(dev_priv) || !dev_priv->psr.source_ok) > > + return false; > > + > > + /* The corresponding CRTC should be active by the time driver turns on > > + * vblank interrupts, which in turn means the enabled pipe power domain > > + * would have acquired the device runtime pm reference. > > + */ > > + intel_runtime_pm_get_if_in_use(dev_priv); > > + > > + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(POWER_DOMAIN_VBLANK)) { > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_lock(&power_well->dc_off.lock); > > + > > + intel_power_well_get(dev_priv, power_well); > > + > > + if (power_well->id == SKL_DISP_PW_DC_OFF) { > > + needs_restore = power_well->dc_off.was_disabled; > > + power_well->dc_off.was_disabled = false; > > + spin_unlock(&power_well->dc_off.lock); > > I don't understand why you need the was_disabled here and not the counter like > the regular pw? > The return from this function is used to determine whether we need to calculate the missed vblanks. Let's say we returned true only when the power well was enabled (based on refcount), we'll end up not fixing up the vblanks in this scenario - _vblank_put() -> DC off disabled(ref count=0) <frame counter reset> _power_get(AUX_A) -> DC off enabled (ref count=1) _vblank_get() -> (ref count=2) > But also overall I can't see how this is avoiding the mutex_lock(&power_domains->lock); > in general. Is there a way to make it more generic in a way that we could define atomic pw and regular pw > and we could see which one would be taking the atomic path easily? > I did try to create a generic interface with _power_nb_get(domain) and _power_nb_put(domain), but the above problem meant that I had to write code specific to the VBLANK power domain inside the generic _power_nb_get(domain) method. > Thanks, > Rodrigo. > > > + } > > + } > > + atomic_inc(&power_domains->domain_use_count[POWER_DOMAIN_VBLANK]); > > + > > + return needs_restore; > > +} > > + > > +/** > > + * intel_display_power_vblank_put - drop a VBLANK power domain reference atomically > > + * > > + * A non-blocking version intel_display_power_put() specifically to control the > > + * DC_OFF power well in atomic contexts. > > + */ > > +void intel_display_power_vblank_put(struct drm_i915_private *dev_priv) > > +{ > > + struct i915_power_domains *power_domains = &dev_priv->power_domains; > > + struct i915_power_well *power_well; > > + > > + if (!HAS_CSR(dev_priv) || !dev_priv->psr.source_ok) > > + return; > > + > > + WARN(atomic_dec_return(&power_domains->domain_use_count[POWER_DOMAIN_VBLANK]) < 0, > > + "Use count on domain %s was already zero\n", > > + intel_display_power_domain_str(POWER_DOMAIN_VBLANK)); > > + > > + for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(POWER_DOMAIN_VBLANK)) { > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_lock(&power_well->dc_off.lock); > > + > > + intel_power_well_put(dev_priv, power_well); > > + > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_unlock(&power_well->dc_off.lock); > > + } > > + > > + intel_runtime_pm_put(dev_priv); > > +} > > + > > static void > > __intel_display_power_get_domain(struct drm_i915_private *dev_priv, > > enum intel_display_power_domain domain) > > @@ -1448,9 +1529,16 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, > > struct i915_power_domains *power_domains = &dev_priv->power_domains; > > struct i915_power_well *power_well; > > > > - for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) > > + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) { > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_lock(&power_well->dc_off.lock); > > + > > intel_power_well_get(dev_priv, power_well); > > > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_unlock(&power_well->dc_off.lock); > > + } > > + > > atomic_inc(&power_domains->domain_use_count[domain]); > > } > > > > @@ -1541,9 +1629,16 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, > > "Use count on domain %s was already zero\n", > > intel_display_power_domain_str(domain)); > > > > - for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) > > + for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_lock(&power_well->dc_off.lock); > > + > > intel_power_well_put(dev_priv, power_well); > > > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_unlock(&power_well->dc_off.lock); > > + } > > + > > mutex_unlock(&power_domains->lock); > > > > intel_runtime_pm_put(dev_priv); > > @@ -1706,6 +1801,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, > > SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ > > BIT_ULL(POWER_DOMAIN_MODESET) | \ > > BIT_ULL(POWER_DOMAIN_AUX_A) | \ > > + BIT_ULL(POWER_DOMAIN_VBLANK) | \ > > BIT_ULL(POWER_DOMAIN_INIT)) > > > > #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ > > @@ -2342,6 +2438,9 @@ static struct i915_power_well cnl_power_wells[] = { > > .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, > > .ops = &gen9_dc_off_power_well_ops, > > .id = SKL_DISP_PW_DC_OFF, > > + { > > + .dc_off.was_disabled = false, > > + }, > > }, > > { > > .name = "power well 2", > > @@ -2470,6 +2569,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) > > int intel_power_domains_init(struct drm_i915_private *dev_priv) > > { > > struct i915_power_domains *power_domains = &dev_priv->power_domains; > > + struct i915_power_well *dc_off_pw; > > > > i915_modparams.disable_power_well = > > sanitize_disable_power_well_option(dev_priv, > > @@ -2507,6 +2607,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) > > set_power_wells(power_domains, i9xx_always_on_power_well); > > } > > > > + dc_off_pw = lookup_power_well(dev_priv, SKL_DISP_PW_DC_OFF); > > + if (dc_off_pw) > > + spin_lock_init(&dc_off_pw->dc_off.lock); > > + > > assert_power_well_ids_unique(dev_priv); > > > > return 0; > > @@ -2555,8 +2659,14 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) > > mutex_lock(&power_domains->lock); > > for_each_power_well(dev_priv, power_well) { > > power_well->ops->sync_hw(dev_priv, power_well); > > + > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_lock(&power_well->dc_off.lock); > > + > > power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, > > power_well); > > + if (power_well->id == SKL_DISP_PW_DC_OFF) > > + spin_unlock(&power_well->dc_off.lock); > > } > > mutex_unlock(&power_domains->lock); > > } > > @@ -3079,6 +3189,13 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) > > if (!power_well->domains) > > continue; > > > > + /* > > + * Reading SKL_DISP_PW_DC_OFF power_well->count without its > > + * private spinlock should be safe here as power_well->count > > + * gets modified only in power_well_get() and power_well_put() > > + * and they are not called until drm_crtc_vblank_on(). > > + */ > > + > > enabled = power_well->ops->is_enabled(dev_priv, power_well); > > if ((power_well->count || power_well->always_on) != enabled) > > DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", > > -- > > 2.11.0 > > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 18d42885205b..ba9107ec1ed1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -397,6 +397,7 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, POWER_DOMAIN_GMBUS, + POWER_DOMAIN_VBLANK, POWER_DOMAIN_MODESET, POWER_DOMAIN_INIT, @@ -1475,6 +1476,10 @@ struct i915_power_well { bool has_vga:1; bool has_fuses:1; } hsw; + struct { + spinlock_t lock; + bool was_disabled; + } dc_off; }; const struct i915_power_well_ops *ops; }; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 30f791f89d64..93ca503f18bb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1865,7 +1865,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override); - +bool intel_display_power_vblank_get(struct drm_i915_private *dev_priv); +void intel_display_power_vblank_put(struct drm_i915_private *dev_priv); /* intel_pm.c */ void intel_init_clock_gating(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index f88f2c070c5f..f1807bd74242 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -126,6 +126,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_D"; case POWER_DOMAIN_GMBUS: return "GMBUS"; + case POWER_DOMAIN_VBLANK: + return "VBLANK"; case POWER_DOMAIN_INIT: return "INIT"; case POWER_DOMAIN_MODESET: @@ -196,10 +198,17 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, if (power_well->always_on) continue; - if (!power_well->hw_enabled) { + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_lock(&power_well->dc_off.lock); + + if (!power_well->hw_enabled) is_enabled = false; + + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_unlock(&power_well->dc_off.lock); + + if (!is_enabled) break; - } } return is_enabled; @@ -724,6 +733,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, skl_enable_dc6(dev_priv); else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(dev_priv); + power_well->dc_off.was_disabled = true; } static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, @@ -1441,6 +1451,77 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, chv_set_pipe_power_well(dev_priv, power_well, false); } +/** + * intel_display_power_vblank_get - acquire a VBLANK power domain reference atomically + * @dev_priv: i915 device instance + * + * This function gets a POWER_DOMAIN_VBLANK reference without blocking and + * returns true if the DC_OFF power well was disabled since this function was + * called the last time. + */ +bool intel_display_power_vblank_get(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + bool needs_restore = false; + + if (!HAS_CSR(dev_priv) || !dev_priv->psr.source_ok) + return false; + + /* The corresponding CRTC should be active by the time driver turns on + * vblank interrupts, which in turn means the enabled pipe power domain + * would have acquired the device runtime pm reference. + */ + intel_runtime_pm_get_if_in_use(dev_priv); + + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(POWER_DOMAIN_VBLANK)) { + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_lock(&power_well->dc_off.lock); + + intel_power_well_get(dev_priv, power_well); + + if (power_well->id == SKL_DISP_PW_DC_OFF) { + needs_restore = power_well->dc_off.was_disabled; + power_well->dc_off.was_disabled = false; + spin_unlock(&power_well->dc_off.lock); + } + } + atomic_inc(&power_domains->domain_use_count[POWER_DOMAIN_VBLANK]); + + return needs_restore; +} + +/** + * intel_display_power_vblank_put - drop a VBLANK power domain reference atomically + * + * A non-blocking version intel_display_power_put() specifically to control the + * DC_OFF power well in atomic contexts. + */ +void intel_display_power_vblank_put(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + + if (!HAS_CSR(dev_priv) || !dev_priv->psr.source_ok) + return; + + WARN(atomic_dec_return(&power_domains->domain_use_count[POWER_DOMAIN_VBLANK]) < 0, + "Use count on domain %s was already zero\n", + intel_display_power_domain_str(POWER_DOMAIN_VBLANK)); + + for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(POWER_DOMAIN_VBLANK)) { + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_lock(&power_well->dc_off.lock); + + intel_power_well_put(dev_priv, power_well); + + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_unlock(&power_well->dc_off.lock); + } + + intel_runtime_pm_put(dev_priv); +} + static void __intel_display_power_get_domain(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) @@ -1448,9 +1529,16 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *power_well; - for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) { + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_lock(&power_well->dc_off.lock); + intel_power_well_get(dev_priv, power_well); + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_unlock(&power_well->dc_off.lock); + } + atomic_inc(&power_domains->domain_use_count[domain]); } @@ -1541,9 +1629,16 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, "Use count on domain %s was already zero\n", intel_display_power_domain_str(domain)); - for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_lock(&power_well->dc_off.lock); + intel_power_well_put(dev_priv, power_well); + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_unlock(&power_well->dc_off.lock); + } + mutex_unlock(&power_domains->lock); intel_runtime_pm_put(dev_priv); @@ -1706,6 +1801,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_VBLANK) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ @@ -2342,6 +2438,9 @@ static struct i915_power_well cnl_power_wells[] = { .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, .id = SKL_DISP_PW_DC_OFF, + { + .dc_off.was_disabled = false, + }, }, { .name = "power well 2", @@ -2470,6 +2569,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *dc_off_pw; i915_modparams.disable_power_well = sanitize_disable_power_well_option(dev_priv, @@ -2507,6 +2607,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) set_power_wells(power_domains, i9xx_always_on_power_well); } + dc_off_pw = lookup_power_well(dev_priv, SKL_DISP_PW_DC_OFF); + if (dc_off_pw) + spin_lock_init(&dc_off_pw->dc_off.lock); + assert_power_well_ids_unique(dev_priv); return 0; @@ -2555,8 +2659,14 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_lock(&power_domains->lock); for_each_power_well(dev_priv, power_well) { power_well->ops->sync_hw(dev_priv, power_well); + + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_lock(&power_well->dc_off.lock); + power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, power_well); + if (power_well->id == SKL_DISP_PW_DC_OFF) + spin_unlock(&power_well->dc_off.lock); } mutex_unlock(&power_domains->lock); } @@ -3079,6 +3189,13 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) if (!power_well->domains) continue; + /* + * Reading SKL_DISP_PW_DC_OFF power_well->count without its + * private spinlock should be safe here as power_well->count + * gets modified only in power_well_get() and power_well_put() + * and they are not called until drm_crtc_vblank_on(). + */ + enabled = power_well->ops->is_enabled(dev_priv, power_well); if ((power_well->count || power_well->always_on) != enabled) DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
When DC states are enabled and PSR is active, the hardware enters DC5/DC6 states resulting in frame counter resets. The frame counter resets mess up the vblank counting logic. So in order to disable DC states when vblank interrupts are required and to disallow DC states when vblanks interrupts are already enabled, introduce a new power domain. Since this power domain reference needs to be acquired and released in atomic context, the corresponding _get() and _put() methods skip the power_domain mutex. Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 5 ++ drivers/gpu/drm/i915/intel_drv.h | 3 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 125 +++++++++++++++++++++++++++++++- 3 files changed, 128 insertions(+), 5 deletions(-)