Message ID | 1483356663-32668-4-git-send-email-vathsala.nagaraju@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Jan 02, 2017 at 05:00:56PM +0530, vathsala nagaraju wrote: > Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled, > psr1 should be disabled.When psr2 is exited , bit 31 of reg > PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL > (psr1 control register)is set to 0. > Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register) > instead of PSR2_STATUS register, which has wrong data, resulting > in blankscreen. > hsw_enable_source is split into hsw_enable_source_psr1 and > hsw_enable_source_psr2 for easier code review and maintenance, > as suggested by rodrigo and jim. > > Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> > Cc: Jim Bride <jim.bride@linux.intel.com> > Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com> > Signed-off-by: Patil Deepti <deepti.patil@intel.com> > --- > drivers/gpu/drm/i915/i915_reg.h | 3 + > drivers/gpu/drm/i915/intel_psr.c | 124 +++++++++++++++++++++++++++++---------- > 2 files changed, 97 insertions(+), 30 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h > index 00970aa..7830e6e 100644 > --- a/drivers/gpu/drm/i915/i915_reg.h > +++ b/drivers/gpu/drm/i915/i915_reg.h > @@ -3615,6 +3615,9 @@ enum { > #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) > #define EDP_PSR2_IDLE_MASK 0xf > > +#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940) > +#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) > + > /* VGA port control */ > #define ADPA _MMIO(0x61100) > #define PCH_ADPA _MMIO(0xe1100) > diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c > index c3aa649..ff2ecfd 100644 > --- a/drivers/gpu/drm/i915/intel_psr.c > +++ b/drivers/gpu/drm/i915/intel_psr.c > @@ -261,12 +261,11 @@ static void vlv_psr_activate(struct intel_dp *intel_dp) > VLV_EDP_PSR_ACTIVE_ENTRY); > } > > -static void hsw_psr_enable_source(struct intel_dp *intel_dp) > +static void hsw_enable_source_psr1(struct intel_dp *intel_dp) > { > struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); > struct drm_device *dev = dig_port->base.base.dev; > struct drm_i915_private *dev_priv = to_i915(dev); > - > uint32_t max_sleep_time = 0x1f; > /* > * Let's respect VBT in case VBT asks a higher idle_frame value. > @@ -312,14 +311,30 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) > val |= EDP_PSR_TP1_TP2_SEL; > > I915_WRITE(EDP_PSR_CTL, val); > +} > > - if (!dev_priv->psr.psr2_support) > - return; > +static void hsw_enable_source_psr2(struct intel_dp *intel_dp) hm... I believe skl_enable_source_psr2 is more appropriated because psr2 was introduced on skl and not on hsw as this might lead people to think... although when we call this we check for psr2 presence and not platform itself. maybe just let hsw_ on the main one hsw_psr_enable_source and for these 2 new functions just use intel_enable_source_psr1 intel_enable_source_psr2 > +{ > + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); > + struct drm_device *dev = dig_port->base.base.dev; > + struct drm_i915_private *dev_priv = to_i915(dev); > + > + /* > + * Let's respect VBT in case VBT asks a higher idle_frame value. > + * Let's use 6 as the minimum to cover all known cases including > + * the off-by-one issue that HW has in some cases. Also there are > + * cases where sink should be able to train > + * with the 5 or 6 idle patterns. > + */ > + uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); > + uint32_t val = EDP_PSR_ENABLE; > + > + val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; > > /* FIXME: selective update is probably totally broken because it doesn't > * mesh at all with our frontbuffer tracking. And the hw alone isn't > * good enough. */ > - val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; > + val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; > > if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) > val |= EDP_PSR2_TP2_TIME_2500; > @@ -333,6 +348,20 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) > I915_WRITE(EDP_PSR2_CTL, val); > } > > + > +static void hsw_psr_enable_source(struct intel_dp *intel_dp) > +{ > + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); > + struct drm_device *dev = dig_port->base.base.dev; > + struct drm_i915_private *dev_priv = to_i915(dev); > + > + /* psr1 and psr2 are mutually exclusive.*/ > + if (dev_priv->psr.psr2_support) > + hsw_enable_source_psr2(intel_dp); > + else > + hsw_enable_source_psr1(intel_dp); > +} > + > static bool intel_psr_match_conditions(struct intel_dp *intel_dp) > { > struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); > @@ -410,7 +439,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp) > struct drm_device *dev = intel_dig_port->base.base.dev; > struct drm_i915_private *dev_priv = to_i915(dev); > > - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); > + if (dev_priv->psr.psr2_support) > + WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); > + else > + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); > WARN_ON(dev_priv->psr.active); > lockdep_assert_held(&dev_priv->psr.lock); > > @@ -462,8 +494,6 @@ void intel_psr_enable(struct intel_dp *intel_dp) > dev_priv->psr.busy_frontbuffer_bits = 0; > > if (HAS_DDI(dev_priv)) { > - hsw_psr_setup_vsc(intel_dp); > - > if (dev_priv->psr.psr2_support) { > /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ > if (crtc->config->pipe_src_w > 3200 || > @@ -471,8 +501,10 @@ void intel_psr_enable(struct intel_dp *intel_dp) > dev_priv->psr.psr2_support = false; > else > skl_psr_setup_su_vsc(intel_dp); > + } else { > + /* set up vsc header for psr1 */ > + hsw_psr_setup_vsc(intel_dp); > } > - > /* > * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD. > * Also mask LPSP to avoid dependency on other drivers that > @@ -557,20 +589,37 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) > struct drm_i915_private *dev_priv = to_i915(dev); > > if (dev_priv->psr.active) { > - I915_WRITE(EDP_PSR_CTL, > - I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); > + if (dev_priv->psr.psr2_support) > + I915_WRITE(EDP_PSR2_CTL, > + I915_READ(EDP_PSR2_CTL) & > + ~(EDP_PSR2_ENABLE | > + EDP_SU_TRACK_ENABLE)); > + else > + I915_WRITE(EDP_PSR_CTL, > + I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); > > /* Wait till PSR is idle */ > - if (intel_wait_for_register(dev_priv, > - EDP_PSR_STATUS_CTL, > - EDP_PSR_STATUS_STATE_MASK, > - 0, > - 2000)) > + if (dev_priv->psr.psr2_support) { > + if (intel_wait_for_register(dev_priv, > + EDP_PSR2_STATUS_CTL, > + EDP_PSR2_STATUS_STATE_MASK, > + 0, > + 2000)) > + DRM_ERROR("Timed out waiting for PSR2 Idle State\n"); > + } else { > + if (intel_wait_for_register(dev_priv, > + EDP_PSR_STATUS_CTL, > + EDP_PSR_STATUS_STATE_MASK, > + 0, > + 2000)) > DRM_ERROR("Timed out waiting for PSR Idle State\n"); > - > + } > dev_priv->psr.active = false; > } else { > - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); > + if (dev_priv->psr.psr2_support) > + WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); > + else > + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); > } > } > > @@ -621,13 +670,24 @@ static void intel_psr_work(struct work_struct *work) > * and be ready for re-enable. > */ > if (HAS_DDI(dev_priv)) { > - if (intel_wait_for_register(dev_priv, > - EDP_PSR_STATUS_CTL, > - EDP_PSR_STATUS_STATE_MASK, > - 0, > - 50)) { > - DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); > - return; > + if (dev_priv->psr.psr2_support) { > + if (intel_wait_for_register(dev_priv, > + EDP_PSR2_STATUS_CTL, > + EDP_PSR2_STATUS_STATE_MASK, > + 0, > + 50)) { > + DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n"); > + return; > + } > + } else { > + if (intel_wait_for_register(dev_priv, > + EDP_PSR_STATUS_CTL, > + EDP_PSR_STATUS_STATE_MASK, > + 0, > + 50)) { > + DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); > + return; > + } > } > } else { > if (intel_wait_for_register(dev_priv, > @@ -669,11 +729,15 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) > return; > > if (HAS_DDI(dev_priv)) { > - val = I915_READ(EDP_PSR_CTL); > - > - WARN_ON(!(val & EDP_PSR_ENABLE)); > - > - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); > + if (dev_priv->psr.psr2_support) { > + val = I915_READ(EDP_PSR2_CTL); > + WARN_ON(!(val & EDP_PSR2_ENABLE)); > + I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); > + } else { > + val = I915_READ(EDP_PSR_CTL); > + WARN_ON(!(val & EDP_PSR_ENABLE)); > + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); > + } > } else { > val = I915_READ(VLV_PSRCTL(pipe)); > > -- > 1.9.1 >
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 00970aa..7830e6e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3615,6 +3615,9 @@ enum { #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) #define EDP_PSR2_IDLE_MASK 0xf +#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940) +#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) + /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index c3aa649..ff2ecfd 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -261,12 +261,11 @@ static void vlv_psr_activate(struct intel_dp *intel_dp) VLV_EDP_PSR_ACTIVE_ENTRY); } -static void hsw_psr_enable_source(struct intel_dp *intel_dp) +static void hsw_enable_source_psr1(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t max_sleep_time = 0x1f; /* * Let's respect VBT in case VBT asks a higher idle_frame value. @@ -312,14 +311,30 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) val |= EDP_PSR_TP1_TP2_SEL; I915_WRITE(EDP_PSR_CTL, val); +} - if (!dev_priv->psr.psr2_support) - return; +static void hsw_enable_source_psr2(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + + /* + * Let's respect VBT in case VBT asks a higher idle_frame value. + * Let's use 6 as the minimum to cover all known cases including + * the off-by-one issue that HW has in some cases. Also there are + * cases where sink should be able to train + * with the 5 or 6 idle patterns. + */ + uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + uint32_t val = EDP_PSR_ENABLE; + + val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; /* FIXME: selective update is probably totally broken because it doesn't * mesh at all with our frontbuffer tracking. And the hw alone isn't * good enough. */ - val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; + val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) val |= EDP_PSR2_TP2_TIME_2500; @@ -333,6 +348,20 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR2_CTL, val); } + +static void hsw_psr_enable_source(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + + /* psr1 and psr2 are mutually exclusive.*/ + if (dev_priv->psr.psr2_support) + hsw_enable_source_psr2(intel_dp); + else + hsw_enable_source_psr1(intel_dp); +} + static bool intel_psr_match_conditions(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -410,7 +439,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + if (dev_priv->psr.psr2_support) + WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); + else + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); @@ -462,8 +494,6 @@ void intel_psr_enable(struct intel_dp *intel_dp) dev_priv->psr.busy_frontbuffer_bits = 0; if (HAS_DDI(dev_priv)) { - hsw_psr_setup_vsc(intel_dp); - if (dev_priv->psr.psr2_support) { /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ if (crtc->config->pipe_src_w > 3200 || @@ -471,8 +501,10 @@ void intel_psr_enable(struct intel_dp *intel_dp) dev_priv->psr.psr2_support = false; else skl_psr_setup_su_vsc(intel_dp); + } else { + /* set up vsc header for psr1 */ + hsw_psr_setup_vsc(intel_dp); } - /* * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD. * Also mask LPSP to avoid dependency on other drivers that @@ -557,20 +589,37 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->psr.active) { - I915_WRITE(EDP_PSR_CTL, - I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); + if (dev_priv->psr.psr2_support) + I915_WRITE(EDP_PSR2_CTL, + I915_READ(EDP_PSR2_CTL) & + ~(EDP_PSR2_ENABLE | + EDP_SU_TRACK_ENABLE)); + else + I915_WRITE(EDP_PSR_CTL, + I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); /* Wait till PSR is idle */ - if (intel_wait_for_register(dev_priv, - EDP_PSR_STATUS_CTL, - EDP_PSR_STATUS_STATE_MASK, - 0, - 2000)) + if (dev_priv->psr.psr2_support) { + if (intel_wait_for_register(dev_priv, + EDP_PSR2_STATUS_CTL, + EDP_PSR2_STATUS_STATE_MASK, + 0, + 2000)) + DRM_ERROR("Timed out waiting for PSR2 Idle State\n"); + } else { + if (intel_wait_for_register(dev_priv, + EDP_PSR_STATUS_CTL, + EDP_PSR_STATUS_STATE_MASK, + 0, + 2000)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); - + } dev_priv->psr.active = false; } else { - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + if (dev_priv->psr.psr2_support) + WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); + else + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); } } @@ -621,13 +670,24 @@ static void intel_psr_work(struct work_struct *work) * and be ready for re-enable. */ if (HAS_DDI(dev_priv)) { - if (intel_wait_for_register(dev_priv, - EDP_PSR_STATUS_CTL, - EDP_PSR_STATUS_STATE_MASK, - 0, - 50)) { - DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); - return; + if (dev_priv->psr.psr2_support) { + if (intel_wait_for_register(dev_priv, + EDP_PSR2_STATUS_CTL, + EDP_PSR2_STATUS_STATE_MASK, + 0, + 50)) { + DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n"); + return; + } + } else { + if (intel_wait_for_register(dev_priv, + EDP_PSR_STATUS_CTL, + EDP_PSR_STATUS_STATE_MASK, + 0, + 50)) { + DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); + return; + } } } else { if (intel_wait_for_register(dev_priv, @@ -669,11 +729,15 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) return; if (HAS_DDI(dev_priv)) { - val = I915_READ(EDP_PSR_CTL); - - WARN_ON(!(val & EDP_PSR_ENABLE)); - - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); + if (dev_priv->psr.psr2_support) { + val = I915_READ(EDP_PSR2_CTL); + WARN_ON(!(val & EDP_PSR2_ENABLE)); + I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); + } else { + val = I915_READ(EDP_PSR_CTL); + WARN_ON(!(val & EDP_PSR_ENABLE)); + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); + } } else { val = I915_READ(VLV_PSRCTL(pipe));