@@ -4939,19 +4939,11 @@ i915_max_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ *val = intel_gpu_freq_limit_get(dev_priv, MAX_FREQ);
return 0;
}
@@ -4961,40 +4953,13 @@ i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hw_max, hw_min;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
- /*
- * Turbo will still be enabled, but won't go above the set value.
- */
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
-
- if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
- return -EINVAL;
- }
-
- dev_priv->rps.max_freq_softlimit = val;
-
- intel_set_rps(dev_priv, val);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return 0;
+ return intel_gpu_freq_limit_set(dev_priv, MAX_FREQ, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
@@ -5006,19 +4971,11 @@ i915_min_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ *val = intel_gpu_freq_limit_get(dev_priv, MIN_FREQ);
return 0;
}
@@ -5028,42 +4985,13 @@ i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hw_max, hw_min;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
- /*
- * Turbo will still be enabled, but won't go below the set value.
- */
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
-
- if (val < hw_min || val > hw_max ||
- val > dev_priv->rps.max_freq_softlimit ||
- val < dev_priv->rps.idle_freq) {
- mutex_unlock(&dev_priv->rps.hw_lock);
- return -EINVAL;
- }
-
- dev_priv->rps.min_freq_softlimit = val;
-
- intel_set_rps(dev_priv, val);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return 0;
+ return intel_gpu_freq_limit_set(dev_priv, MIN_FREQ, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
@@ -5075,19 +5003,11 @@ i915_idle_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ *val = intel_gpu_freq_limit_get(dev_priv, IDLE_FREQ);
return 0;
}
@@ -5097,37 +5017,13 @@ i915_idle_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hw_max, hw_min;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
DRM_DEBUG_DRIVER("Manually setting idle freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
-
- if (val < hw_min || val > hw_max || val > dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
- return -EINVAL;
- }
-
- dev_priv->rps.idle_freq = val;
-
- intel_set_rps(dev_priv, val);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return 0;
+ return intel_gpu_freq_limit_set(dev_priv, IDLE_FREQ, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_idle_freq_fops,
@@ -1123,6 +1123,12 @@ struct intel_rps_ei {
u32 media_c0;
};
+enum intel_rps_freq_limit {
+ IDLE_FREQ,
+ MIN_FREQ,
+ MAX_FREQ
+};
+
struct intel_gen6_power_mgmt {
/*
* work, interrupts_enabled and pm_iir are protected by
@@ -3654,6 +3660,10 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+int intel_gpu_freq_limit_get(struct drm_i915_private *dev_priv,
+ enum intel_rps_freq_limit freq);
+int intel_gpu_freq_limit_set(struct drm_i915_private *dev_priv,
+ enum intel_rps_freq_limit freq, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@ -344,11 +344,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ ret = intel_gpu_freq_limit_get(dev_priv, MAX_FREQ);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
@@ -367,40 +363,9 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- val = intel_freq_opcode(dev_priv, val);
-
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val < dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
- intel_runtime_pm_put(dev_priv);
- return -EINVAL;
- }
-
- if (val > dev_priv->rps.rp0_freq)
- DRM_DEBUG("User requested overclocking to %d\n",
- intel_gpu_freq(dev_priv, val));
-
- dev_priv->rps.max_freq_softlimit = val;
-
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
-
- /* We still need *_set_rps to process the new max_delay and
- * update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- intel_set_rps(dev_priv, val);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- intel_runtime_pm_put(dev_priv);
+ ret = intel_gpu_freq_limit_set(dev_priv, MAX_FREQ, val);
+ if (ret)
+ return ret;
return count;
}
@@ -412,11 +377,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ ret = intel_gpu_freq_limit_get(dev_priv, MIN_FREQ);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
@@ -435,37 +396,9 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- val = intel_freq_opcode(dev_priv, val);
-
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val > dev_priv->rps.max_freq_softlimit ||
- val < dev_priv->rps.idle_freq) {
- mutex_unlock(&dev_priv->rps.hw_lock);
- intel_runtime_pm_put(dev_priv);
- return -EINVAL;
- }
-
- dev_priv->rps.min_freq_softlimit = val;
-
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
-
- /* We still need *_set_rps to process the new min_delay and
- * update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- intel_set_rps(dev_priv, val);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- intel_runtime_pm_put(dev_priv);
+ ret = intel_gpu_freq_limit_set(dev_priv, MIN_FREQ, val);
+ if (ret)
+ return ret;
return count;
@@ -478,14 +411,11 @@ static ssize_t gt_idle_freq_mhz_show(struct device *kdev, struct device_attribut
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ ret = intel_gpu_freq_limit_get(dev_priv, IDLE_FREQ);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+
static ssize_t gt_idle_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -500,36 +430,9 @@ static ssize_t gt_idle_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- val = intel_freq_opcode(dev_priv, val);
-
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val > dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
- intel_runtime_pm_put(dev_priv);
- return -EINVAL;
- }
-
- dev_priv->rps.idle_freq = val;
-
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
-
- /* We still need *_set_rps to process the new min_delay and
- * update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- intel_set_rps(dev_priv, val);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- intel_runtime_pm_put(dev_priv);
+ ret = intel_gpu_freq_limit_set(dev_priv, IDLE_FREQ, val);
+ if (ret)
+ return ret;
return count;
}
@@ -7530,6 +7530,92 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
}
+int intel_gpu_freq_limit_get(struct drm_i915_private *dev_priv,
+ enum intel_rps_freq_limit freq)
+{
+ int ret = 0;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ switch (freq) {
+ case IDLE_FREQ:
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq);
+ break;
+ case MIN_FREQ:
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+ break;
+ case MAX_FREQ:
+ ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+ break;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return ret;
+}
+
+int intel_gpu_freq_limit_set(struct drm_i915_private *dev_priv,
+ enum intel_rps_freq_limit freq, int val)
+{
+ int ret = 0;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = intel_freq_opcode(dev_priv, val);
+
+ /* Check basic constraints */
+ if (val < dev_priv->rps.min_freq ||
+ val > dev_priv->rps.max_freq) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (freq) {
+ case IDLE_FREQ:
+ if (val > dev_priv->rps.min_freq_softlimit) {
+ ret = -EINVAL;
+ goto out;
+ }
+ dev_priv->rps.idle_freq = val;
+ break;
+
+ case MIN_FREQ:
+ if (val > dev_priv->rps.max_freq_softlimit ||
+ val < dev_priv->rps.idle_freq) {
+ ret = -EINVAL;
+ goto out;
+ }
+ dev_priv->rps.min_freq_softlimit = val;
+ break;
+
+ case MAX_FREQ:
+ if (val < dev_priv->rps.min_freq_softlimit) {
+ ret = -EINVAL;
+ goto out;
+ }
+ dev_priv->rps.max_freq_softlimit = val;
+ break;
+ }
+
+ val = clamp_t(int, dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ /* We still need *_set_rps to process the new max_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged. */
+ intel_set_rps(dev_priv, val);
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
struct request_boost {
struct work_struct work;
struct drm_i915_gem_request *req;
No functional changes other than using regular mutex_lock instead of interruptible one for debugfs. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Micha? Winiarski <michal.winiarski@intel.com> --- drivers/gpu/drm/i915/i915_debugfs.c | 116 ++-------------------------------- drivers/gpu/drm/i915/i915_drv.h | 10 +++ drivers/gpu/drm/i915/i915_sysfs.c | 123 ++++-------------------------------- drivers/gpu/drm/i915/intel_pm.c | 86 +++++++++++++++++++++++++ 4 files changed, 115 insertions(+), 220 deletions(-)