@@ -1572,6 +1572,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1367,6 +1367,9 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -2036,6 +2039,7 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ bool use_mmio_flip;
};
extern struct i915_params i915 __read_mostly;
@@ -2231,6 +2235,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
@@ -2601,6 +2607,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -1096,7 +1096,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
-static int
+__must_check int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1218,6 +1218,9 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_notify_mmio_flip(ring);
+
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
}
@@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = {
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
+ .use_mmio_flip = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -156,3 +157,6 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, bool, 0600);
+MODULE_PARM_DESC(use_mmio_flip, "use MMIO flips (default: false)");
@@ -9183,6 +9183,143 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
return 0;
}
+static bool intel_use_mmio_flip(struct drm_device *dev)
+{
+ /* If module parameter is disabled, use CS flips.
+ * Otherwise, use MMIO flips starting from Gen5.
+ * This is not being used for older platforms, because
+ * non-availability of flip done interrupt forces us to use
+ * CS flips. Older platforms derive flip done using some clever
+ * tricks involving the flip_pending status bits and vblank irqs.
+ * So using MMIO flips there would disrupt this mechanism.
+ */
+
+ if (i915.use_mmio_flip == 0)
+ return false;
+
+ if (INTEL_INFO(dev)->gen >= 5)
+ return true;
+ else
+ return false;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(intel_crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ reg = DSPCNTR(intel_crtc->plane);
+ dspcntr = I915_READ(reg);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+ I915_WRITE(reg, dspcntr);
+
+ I915_WRITE(DSPSURF(intel_crtc->plane),
+ intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (!obj->ring)
+ return 0;
+
+ if (i915_seqno_passed(obj->ring->get_seqno(obj->ring, true),
+ obj->last_write_seqno))
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!obj->ring->irq_get(obj->ring)))
+ return 0;
+
+ return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ unsigned long irq_flags;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ for_each_intel_crtc(ring->dev, intel_crtc) {
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = &intel_crtc->mmio_flip;
+
+ if (mmio_flip->seqno == 0)
+ continue;
+ if (ring->id != mmio_flip->ring_id)
+ continue;
+
+ if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+ intel_do_mmio_flip(intel_crtc);
+ mmio_flip->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long irq_flags;
+ int ret;
+
+ if (WARN_ON(intel_crtc->mmio_flip.seqno)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = intel_postpone_flip(obj);
+ if (ret < 0) {
+ goto err;
+ } else if (ret == 0) {
+ intel_do_mmio_flip(intel_crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+ /* Double check to catch cases where irq fired before
+ * mmio flip data was ready
+ */
+ intel_notify_mmio_flip(obj->ring);
+ return 0;
+err:
+ return ret;
+}
+
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -11515,6 +11652,9 @@ static void intel_init_display(struct drm_device *dev)
break;
}
+ if (intel_use_mmio_flip(dev))
+ dev_priv->display.queue_flip = intel_queue_mmio_flip;
+
intel_panel_init_backlight_funcs(dev);
}
@@ -358,6 +358,11 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct intel_mmio_flip {
+ u32 seqno;
+ u32 ring_id;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -411,6 +416,7 @@ struct intel_crtc {
wait_queue_head_t vbl_wait;
int scanline_offset;
+ struct intel_mmio_flip mmio_flip;
};
struct intel_plane_wm_parameters {