@@ -6562,6 +6562,43 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
icl_wa_scalerclkgating(dev_priv, pipe, false);
}
+static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *plane;
+ struct intel_plane_state *new_plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ u32 update_mask = new_crtc_state->update_planes;
+ u32 plane_ctl, surf_addr;
+ enum plane_id plane_id;
+ unsigned long irqflags;
+ enum pipe pipe;
+
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ plane_id = plane->id;
+ pipe = plane->pipe;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
+ surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
+
+ plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ }
+
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+}
+
static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -6647,6 +6684,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ /*
+ * WA for platforms where async address update enable bit
+ * is double buffered and only latched at start of vblank.
+ */
+ if (old_crtc_state->uapi.async_flip &&
+ !new_crtc_state->uapi.async_flip &&
+ IS_GEN_RANGE(dev_priv, 9, 10))
+ skl_disable_async_flip_wa(state, crtc, new_crtc_state);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,