@@ -1566,6 +1566,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1436,6 +1436,12 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
+struct i915_flip_data {
+ struct drm_crtc *crtc;
+ u32 seqno;
+ u32 ring_id;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1643,6 +1649,11 @@ typedef struct drm_i915_private {
struct i915_ums_state ums;
u32 suspend_count;
+
+ /* protects the flip_data */
+ spinlock_t flip_lock;
+
+ struct i915_flip_data flip_data[I915_MAX_PIPES];
} drm_i915_private_t;
static inline struct drm_i915_private *to_i915(const struct drm_device *dev) @@ -2681,6 +2692,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index be2713f..9b2007e 100644
@@ -1062,6 +1062,8 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ intel_notify_mmio_flip(dev, ring);
+
if (ring->obj == NULL)
return;
@@ -8813,6 +8813,122 @@ err:
return ret;
}
+static void intel_do_mmio_flip(struct drm_crtc *crtc) {
+ struct intel_crtc *intel_crtc;
+
+ intel_crtc = to_intel_crtc(crtc) ;
+
+ intel_mark_page_flip_active(intel_crtc);
+ i9xx_update_plane(crtc, crtc->fb, 0, 0); }
+
+static bool intel_postpone_flip(struct drm_i915_gem_object *obj) {
+ int ret;
+ if(!obj->ring)
+ return false;
+
+ if (i915_seqno_passed(obj->ring->get_seqno(obj->ring, false),
+ obj->last_write_seqno))
+ return false;
+
+ if (obj->last_write_seqno == obj->ring->outstanding_lazy_seqno) {
+ ret = i915_add_request(obj->ring, NULL);
+ if(WARN_ON(ret))
+ return false;
+ }
+
+ if(WARN_ON(!obj->ring->irq_get(obj->ring)))
+ return false;
+
+ return true;
+}
+
+void intel_notify_mmio_flip(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ struct i915_flip_data *flip_data;
+ unsigned long irq_flags;
+ u32 seqno, count;
+
+ BUG_ON(!ring);
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->flip_lock, irq_flags);
+
+ for(count=0;count<I915_MAX_PIPES;count++) {
+ flip_data = &(dev_priv->flip_data[count]);
+ intel_crtc = to_intel_crtc(flip_data->crtc);
+ if ((flip_data->seqno != 0) &&
+ (ring->id == flip_data->ring_id) &&
+ ( seqno >= flip_data->seqno ) ) {
+ /*FIXME: Can move do_mmio_flip out of spinlock protection */
+ intel_do_mmio_flip(flip_data->crtc);
+ flip_data->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->flip_lock, irq_flags); }
+
+/* Using MMIO based flips starting from VLV, for Media power well
+ * residency optimization. The other alternative of having Render
+ * ring based flip calls is not being used, as the performance
+ * (FPS) of certain 3D Apps was getting severly affected.
+ */
+static int intel_gen7_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct i915_flip_data *flip_data = &(dev_priv->flip_data[intel_crtc->pipe]);
+ unsigned long irq_flags;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, obj->ring);
+ if (ret)
+ goto err;
+
+ switch (intel_crtc->plane) {
+ case PLANE_A:
+ case PLANE_B:
+ case PLANE_C:
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+ goto err_unpin;
+ }
+
+ if(!intel_postpone_flip(obj)) {
+ intel_do_mmio_flip(crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->flip_lock, irq_flags);
+ flip_data->seqno = obj->last_write_seqno;
+ flip_data->ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->flip_lock, irq_flags);
+
+ /* Double check to catch cases where irq fired before
+ * flip data was ready
+ */
+ intel_notify_mmio_flip(dev, obj->ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -10581,6 +10697,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);