@@ -1823,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev)
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
- cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
-
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -849,6 +849,9 @@ static int i915_runtime_suspend(struct device *device)
DRM_DEBUG_KMS("Suspending device\n");
+ if (HAS_PC8(dev))
+ __hsw_do_enable_pc8(dev_priv);
+
i915_gem_release_all_mmaps(dev_priv);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -863,6 +866,7 @@ static int i915_runtime_suspend(struct device *device)
*/
intel_opregion_notify_adapter(dev, PCI_D1);
+ DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
@@ -879,6 +883,10 @@ static int i915_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
+ if (HAS_PC8(dev))
+ __hsw_do_disable_pc8(dev_priv);
+
+ DRM_DEBUG_KMS("Device resumed\n");
return 0;
}
@@ -1339,6 +1339,10 @@ struct ilk_wm_values {
/*
* This struct tracks the state needed for the Package C8+ feature.
*
+ * TODO: we're merging the Package C8+ feature with the runtime PM support. To
+ * avoid having to update the documentation at each patch of the series, we'll
+ * do a final update at the end.
+ *
* Package states C8 and deeper are really deep PC states that can only be
* reached when all the devices on the system allow it, so even if the graphics
* device allows PC8+, it doesn't mean the system will actually get to these
@@ -1392,7 +1396,6 @@ struct i915_package_c8 {
bool enabled;
int disable_count;
struct mutex lock;
- struct delayed_work enable_work;
struct {
uint32_t deimr;
@@ -2095,8 +2098,6 @@ struct i915_params {
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
- int enable_pc8;
- int pc8_timeout;
int invert_brightness;
int enable_cmd_parser;
/* leave bools at the end to not create holes */
@@ -42,8 +42,6 @@ struct i915_params i915 __read_mostly = {
.disable_power_well = 1,
.enable_ips = 1,
.fastboot = 0,
- .enable_pc8 = 1,
- .pc8_timeout = 5000,
.prefault_disable = 0,
.reset = true,
.invert_brightness = 0,
@@ -135,14 +133,6 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named(enable_pc8, i915.enable_pc8, int, 0600);
-MODULE_PARM_DESC(enable_pc8,
- "Enable support for low power package C states (PC8+) (default: true)");
-
-module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600);
-MODULE_PARM_DESC(pc8_timeout,
- "Number of msecs of idleness required to enter PC8+ (default: 5000)");
-
module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
@@ -6958,6 +6958,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
+ unsigned long irqflags;
val = I915_READ(LCPLL_CTL);
@@ -6965,9 +6966,22 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
return;
- /* Make sure we're not on PC8 state before disabling PC8, otherwise
- * we'll hang the machine! */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ *
+ * The other problem is that hsw_restore_lcpll() is called as part of
+ * the runtime PM resume sequence, so we can't just call
+ * gen6_gt_force_wake_get() because that function calls
+ * intel_runtime_pm_get(), and we can't change the runtime PM refcount
+ * while we are on the resume sequence. So to solve this problem we have
+ * to call special forcewake code that doesn't touch runtime PM and
+ * doesn't enable the forcewake delayed work.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7001,14 +7015,20 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ /* See the big comment above. */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void __hsw_do_enable_pc8(struct drm_i915_private *dev_priv)
+void __hsw_do_enable_pc8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t val;
+ WARN_ON(!HAS_PC8(dev));
+
DRM_DEBUG_KMS("Enabling package C8+\n");
dev_priv->pc8.enabled = true;
@@ -7024,22 +7044,6 @@ static void __hsw_do_enable_pc8(struct drm_i915_private *dev_priv)
hsw_disable_lcpll(dev_priv, true, true);
}
-void hsw_enable_pc8_work(struct work_struct *__work)
-{
- struct drm_i915_private *dev_priv =
- container_of(to_delayed_work(__work), struct drm_i915_private,
- pc8.enable_work);
- struct drm_device *dev = dev_priv->dev;
-
- WARN_ON(!HAS_PC8(dev));
-
- if (dev_priv->pc8.enabled)
- return;
-
- __hsw_do_enable_pc8(dev_priv);
- intel_runtime_pm_put(dev_priv);
-}
-
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
@@ -7050,15 +7054,16 @@ static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
if (dev_priv->pc8.disable_count != 0)
return;
- schedule_delayed_work(&dev_priv->pc8.enable_work,
- msecs_to_jiffies(i915.pc8_timeout));
+ intel_runtime_pm_put(dev_priv);
}
-static void __hsw_do_disable_package_c8(struct drm_i915_private *dev_priv)
+void __hsw_do_disable_pc8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t val;
+ WARN_ON(!HAS_PC8(dev));
+
DRM_DEBUG_KMS("Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
@@ -7081,8 +7086,6 @@ static void __hsw_do_disable_package_c8(struct drm_i915_private *dev_priv)
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 0,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
@@ -7091,14 +7094,7 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
if (dev_priv->pc8.disable_count != 1)
return;
- WARN_ON(!HAS_PC8(dev));
-
- cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
- if (!dev_priv->pc8.enabled)
- return;
-
intel_runtime_pm_get(dev_priv);
- __hsw_do_disable_package_c8(dev_priv);
}
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
@@ -7156,9 +7152,6 @@ static void hsw_update_package_c8(struct drm_device *dev)
if (!HAS_PC8(dev_priv->dev))
return;
- if (!i915.enable_pc8)
- return;
-
mutex_lock(&dev_priv->pc8.lock);
allow = hsw_can_enable_package_c8(dev_priv);
@@ -725,7 +725,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int bpp,
unsigned int pitch);
void intel_display_handle_reset(struct drm_device *dev);
-void hsw_enable_pc8_work(struct work_struct *__work);
+void __hsw_do_enable_pc8(struct drm_i915_private *dev_priv);
+void __hsw_do_disable_pc8(struct drm_i915_private *dev_priv);
void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -6161,7 +6161,6 @@ void intel_pm_setup(struct drm_device *dev)
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 1; /* requirements_met */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}