@@ -670,6 +670,21 @@ struct i915_fbc {
} no_fbc_reason;
};
+/* configure the number of secs the system must be idle
+ * before DRRS is enabled
+*/
+#define DRRS_IDLENESS_TIME 2000 /* in millisecs */
+
+struct i915_drrs {
+ struct intel_connector *connector;
+ struct intel_dp *dp;
+ struct intel_drrs_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ int interval;
+ } *drrs_work;
+};
+
struct i915_psr {
bool sink_support;
bool source_ok;
@@ -1347,6 +1362,7 @@ typedef struct drm_i915_private {
int num_plane;
struct i915_fbc fbc;
+ struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -2365,6 +2381,9 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_init_drrs_idleness_detection(struct drm_device *dev,
+ struct intel_connector *connector, struct intel_dp *dp);
+extern void intel_update_drrs(struct drm_device *dev, bool update);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
@@ -2372,6 +2372,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
intel_update_fbc(dev);
+ intel_update_drrs(dev, true);
intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
@@ -3520,6 +3521,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_update_drrs(dev, true);
mutex_unlock(&dev->struct_mutex);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -3561,6 +3563,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_update_drrs(dev, true);
mutex_unlock(&dev->struct_mutex);
}
@@ -3767,6 +3770,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_update_drrs(dev, true);
mutex_unlock(&dev->struct_mutex);
}
@@ -3814,6 +3818,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_update_drrs(dev, true);
mutex_unlock(&dev->struct_mutex);
}
@@ -7938,6 +7943,11 @@ static void intel_unpin_work_fn(struct work_struct *__work)
drm_gem_object_unreference(&work->old_fb_obj->base);
intel_update_fbc(dev);
+
+ /* disable current DRRS work scheduled and restart
+ * to push work by another x seconds
+ */
+ intel_update_drrs(dev, true);
mutex_unlock(&dev->struct_mutex);
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
@@ -8377,6 +8387,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
+ intel_update_drrs(dev, false);
intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
@@ -10986,6 +10997,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
+ intel_update_drrs(dev, false);
+
intel_disable_gt_powersave(dev);
ironlake_teardown_rc6(dev);
@@ -3219,11 +3219,18 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ /* DRRS cleanup */
+ if (intel_dp->drrs_state.is_drrs_supported
+ == SEAMLESS_DRRS_SUPPORT) {
+ kfree(dev_priv->drrs.drrs_work);
+ dev_priv->drrs.drrs_work = NULL;
+ }
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
@@ -3829,6 +3836,8 @@ intel_dp_find_drrs_lowclk(struct intel_digital_port *intel_dig_port,
dev_priv->vbt.drrs_mode == SEAMLESS_DRRS_SUPPORT) {
intel_dp_drrs_modelist_create(intel_dig_port, fixed_mode,
temp_mode);
+ intel_init_drrs_idleness_detection(dev,
+ intel_connector, intel_dp);
mutex_init(&intel_dp->drrs_state.mutex);
intel_dp->drrs_state.is_drrs_supported =
dev_priv->vbt.drrs_mode;
@@ -608,6 +608,119 @@ out_disable:
i915_gem_stolen_cleanup_compression(dev);
}
+static void intel_drrs_work_fn(struct work_struct *__work)
+{
+ struct intel_drrs_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_drrs_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_dp_set_drrs_state(work->crtc->dev,
+ dev_priv->drrs.dp->drrs_state.refresh_rate_array[DRRS_LOW_RR]);
+}
+
+static void intel_cancel_drrs_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->drrs.drrs_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending DRRS enable\n");
+
+ cancel_delayed_work_sync(&dev_priv->drrs.drrs_work->work);
+}
+
+static void intel_enable_drrs(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->drrs.dp->drrs_state.drrs_refresh_rate_type
+ != DRRS_LOW_RR) {
+ dev_priv->drrs.drrs_work->crtc = crtc;
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting DRRS
+ */
+ schedule_delayed_work(&dev_priv->drrs.drrs_work->work,
+ msecs_to_jiffies(dev_priv->drrs.drrs_work->interval));
+ }
+}
+
+/**
+ * intel_update_drrs - enable/disable DRRS as needed
+ * @dev: the drm_device
+ * @update: if set to true, cancel current work and schedule new work.
+ * if set to false, cancel current work and disable DRRS.
+*/
+void intel_update_drrs(struct drm_device *dev, bool update)
+{
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* if drrs.connector is NULL, then drrs_init did not get called.
+ * which means DRRS is not supported.
+ */
+ if (dev_priv->drrs.connector == NULL) {
+ DRM_INFO("DRRS is not supported.\n");
+ return;
+ }
+
+ intel_cancel_drrs_work(dev_priv);
+
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc != NULL && intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (crtc) {
+ DRM_DEBUG_KMS(
+ "more than one pipe active, disabling DRRS\n");
+ update = false;
+ break;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (crtc == NULL) {
+ DRM_INFO("DRRS: crtc not initialized\n");
+ return;
+ }
+
+ /* as part of disable DRRS, reset refresh rate to HIGH_RR */
+ if (dev_priv->drrs.dp->drrs_state.drrs_refresh_rate_type
+ == DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev,
+ dev_priv->drrs.dp->drrs_state.
+ refresh_rate_array[DRRS_HIGH_RR]);
+
+ if (update == true) {
+ /* re-enable idleness detection */
+ intel_enable_drrs(crtc);
+ }
+}
+
+void intel_init_drrs_idleness_detection(struct drm_device *dev,
+ struct intel_connector *connector,
+ struct intel_dp *dp)
+{
+ struct intel_drrs_work *work;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ work = kzalloc(sizeof(struct intel_drrs_work), GFP_KERNEL);
+ if (!work) {
+ DRM_ERROR("Failed to allocate DRRS work structure\n");
+ return;
+ }
+
+ dev_priv->drrs.connector = connector;
+ dev_priv->drrs.dp = dp;
+
+ work->interval = DRRS_IDLENESS_TIME;
+ INIT_DELAYED_WORK(&work->work, intel_drrs_work_fn);
+
+ dev_priv->drrs.drrs_work = work;
+}
+
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -542,6 +542,7 @@ intel_enable_primary(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_update_drrs(dev, true);
mutex_unlock(&dev->struct_mutex);
}
@@ -561,6 +562,8 @@ intel_disable_primary(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
intel_disable_fbc(dev);
+
+ intel_update_drrs(dev, false);
mutex_unlock(&dev->struct_mutex);
/*