From patchwork Fri Nov 16 14:22:15 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?b?VmlsbGUgU3lyasOkbMOk?= X-Patchwork-Id: 1755251 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork2.kernel.org (Postfix) with ESMTP id B8415DF230 for ; Fri, 16 Nov 2012 14:26:31 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id A60B643716 for ; Fri, 16 Nov 2012 06:26:31 -0800 (PST) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by gabe.freedesktop.org (Postfix) with ESMTP id AD18143709; Fri, 16 Nov 2012 06:22:48 -0800 (PST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP; 16 Nov 2012 06:22:48 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.83,265,1352102400"; d="scan'208";a="248252914" Received: from stinkbox.fi.intel.com (HELO stinkbox) ([10.237.72.168]) by fmsmga001.fm.intel.com with SMTP; 16 Nov 2012 06:22:41 -0800 Received: by stinkbox (sSMTP sendmail emulation); Fri, 16 Nov 2012 16:22:40 +0200 From: ville.syrjala@linux.intel.com To: dri-devel@lists.freedesktop.org Subject: [PATCH 03/13] drm/i915: Implement a non-blocking GPU synchronization mechanism for atomic page flips Date: Fri, 16 Nov 2012 16:22:15 +0200 Message-Id: <1353075745-30115-4-git-send-email-ville.syrjala@linux.intel.com> X-Mailer: git-send-email 1.7.8.6 In-Reply-To: <1353075745-30115-1-git-send-email-ville.syrjala@linux.intel.com> References: <1353075745-30115-1-git-send-email-ville.syrjala@linux.intel.com> MIME-Version: 1.0 Cc: intel-gfx@lists.freedesktop.org X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org From: Ville Syrjälä After the atomic flip has been split up into individual flip requests for each scanout engine, put each such request into a FIFO. Then for each flip request add new request to the ring(s) in order to get an interrupt once the GPU has finished whatever it was doing with the new front buffer. Once the flip requests associated with the same atomic flip and the same pipe at the head of the queue are ready, pull them from the queue and issue the flips to the hardware. Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/i915_drv.h | 8 ++ drivers/gpu/drm/i915/i915_irq.c | 2 + drivers/gpu/drm/i915/intel_atomic.c | 233 ++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_drv.h | 1 + 4 files changed, 240 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8805a07..8aaf0ce 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -920,6 +920,14 @@ typedef struct drm_i915_private { struct i915_dri1_state dri1; struct drm_flip_driver flip_driver; + + struct { + struct list_head list; + spinlock_t lock; + struct work_struct work; + struct workqueue_struct *wq; + unsigned int next_flip_seq; + } flip; } drm_i915_private_t; /* Iterate over initialised rings */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 74b6f5f..441306d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -348,6 +348,8 @@ static void notify_ring(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; + intel_atomic_notify_ring(dev, ring); + if (ring->obj == NULL) return; diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 33357ec..04a20c5 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -30,6 +30,10 @@ #include "intel_drv.h" +#define USE_WRITE_SEQNO +#define USE_OLR +//#define FORCE_CACHE_FLUSH + struct intel_flip { struct drm_flip base; u32 vbl_count; @@ -42,6 +46,9 @@ struct intel_flip { struct drm_pending_atomic_event *event; uint32_t old_fb_id; struct list_head pending_head; + struct intel_ring_buffer *ring; + u32 seqno; + unsigned int flip_seq; }; struct intel_plane_state { @@ -801,6 +808,7 @@ static int pin_fbs(struct drm_device *dev, struct intel_atomic_state *s) { int i, ret; + bool nonblock = s->flags & DRM_MODE_ATOMIC_NONBLOCK; for (i = 0; i < dev->mode_config.num_crtc; i++) { struct intel_crtc_state *st = &s->crtc[i]; @@ -816,7 +824,7 @@ static int pin_fbs(struct drm_device *dev, obj = to_intel_framebuffer(crtc->fb)->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + ret = intel_pin_and_fence_fb_obj(dev, obj, nonblock ? obj->ring : NULL); mutex_unlock(&dev->struct_mutex); if (ret) @@ -839,7 +847,7 @@ static int pin_fbs(struct drm_device *dev, obj = to_intel_framebuffer(plane->fb)->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + ret = intel_pin_and_fence_fb_obj(dev, obj, nonblock ? obj->ring : NULL); mutex_unlock(&dev->struct_mutex); if (ret) @@ -2040,6 +2048,8 @@ static const struct drm_flip_driver_funcs intel_flip_driver_funcs = { .flush = intel_flip_driver_flush, }; +static void intel_atomic_process_flips_work(struct work_struct *work); + static void intel_flip_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2058,6 +2068,11 @@ static void intel_flip_init(struct drm_device *dev) list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) drm_flip_helper_init(&intel_plane->flip_helper, &dev_priv->flip_driver, &intel_flip_funcs); + + INIT_LIST_HEAD(&dev_priv->flip.list); + spin_lock_init(&dev_priv->flip.lock); + INIT_WORK(&dev_priv->flip.work, intel_atomic_process_flips_work); + dev_priv->flip.wq = create_singlethread_workqueue("intel_flip"); } static void intel_flip_fini(struct drm_device *dev) @@ -2075,6 +2090,46 @@ static void intel_flip_fini(struct drm_device *dev) drm_flip_driver_fini(&dev_priv->flip_driver); } +static bool intel_atomic_postpone_flip(struct intel_flip *intel_flip) +{ + struct intel_ring_buffer *ring = intel_flip->ring; + int ret; + +#ifdef FORCE_CACHE_FLUSH + /* Unconditionally force add_request to emit a full flush. */ + ring->gpu_caches_dirty = true; +#endif + +#ifdef USE_OLR + ret = i915_gem_check_olr(ring, intel_flip->seqno); + if (WARN_ON(ret)) { + intel_flip->ring = NULL; + return false; + } + + if (i915_seqno_passed(ring->get_seqno(ring, true), intel_flip->seqno)) { + intel_flip->ring = NULL; + return false; + } +#endif + + if (WARN_ON(!ring->irq_get(ring))) { + intel_flip->ring = NULL; + return false; + } + +#ifndef USE_OLR + ret = i915_add_request(ring, NULL, &intel_flip->seqno); + if (WARN_ON(ret)) { + ring->irq_put(ring); + intel_flip->ring = NULL; + return false; + } +#endif + + return true; +} + static void intel_atomic_schedule_flips(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct list_head *flips) @@ -2095,15 +2150,120 @@ static void intel_atomic_schedule_flips(struct drm_i915_private *dev_priv, local_irq_enable(); } +static bool intel_atomic_flips_ready(struct drm_device *dev, unsigned int flip_seq) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_flip *intel_flip; + + /* check if all flips w/ same flip_seq are ready */ + list_for_each_entry(intel_flip, &dev_priv->flip.list, base.list) { + if (intel_flip->flip_seq != flip_seq) + break; + + if (intel_flip->ring) + return false; + } + + return true; +} + +static void intel_atomic_process_flips_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, flip.work); + struct drm_device *dev = dev_priv->dev; + + for (;;) { + struct intel_flip *intel_flip, *next; + unsigned int flip_seq; + struct intel_crtc *intel_crtc; + LIST_HEAD(flips); + unsigned long flags; + + if (list_empty(&dev_priv->flip.list)) + return; + + spin_lock_irqsave(&dev_priv->flip.lock, flags); + + intel_flip = list_first_entry(&dev_priv->flip.list, struct intel_flip, base.list); + flip_seq = intel_flip->flip_seq; + intel_crtc = to_intel_crtc(intel_flip->crtc); + + if (intel_atomic_flips_ready(dev, flip_seq)) { + list_for_each_entry_safe(intel_flip, next, &dev_priv->flip.list, base.list) { + if (intel_flip->flip_seq != flip_seq) + break; + list_move_tail(&intel_flip->base.list, &flips); + } + } + + spin_unlock_irqrestore(&dev_priv->flip.lock, flags); + + if (list_empty(&flips)) + return; + + mutex_lock(&dev->mode_config.mutex); + intel_atomic_schedule_flips(dev_priv, intel_crtc, &flips); + mutex_unlock(&dev->mode_config.mutex); + } +} + +static void intel_atomic_check_flips_ready(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_flip *intel_flip; + + if (list_empty(&dev_priv->flip.list)) + return; + + intel_flip = list_first_entry(&dev_priv->flip.list, struct intel_flip, base.list); + if (intel_atomic_flips_ready(dev, intel_flip->flip_seq)) + queue_work(dev_priv->flip.wq, &dev_priv->flip.work); +} + +void intel_atomic_notify_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_flip *intel_flip; + unsigned long flags; + u32 seqno; + + if (list_empty(&dev_priv->flip.list)) + return; + + seqno = ring->get_seqno(ring, false); + + spin_lock_irqsave(&dev_priv->flip.lock, flags); + + list_for_each_entry(intel_flip, &dev_priv->flip.list, base.list) { + if (ring != intel_flip->ring) + continue; + + if (i915_seqno_passed(seqno, intel_flip->seqno)) { + intel_flip->ring = NULL; + ring->irq_put(ring); + } + } + + intel_atomic_check_flips_ready(dev); + + spin_unlock_irqrestore(&dev_priv->flip.lock, flags); +} + static void atomic_pipe_commit(struct drm_device *dev, struct intel_atomic_state *state, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = state->file->driver_priv; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); LIST_HEAD(flips); int i; + /* FIXME treat flips for all pipes as one set for GPU sync */ + unsigned int flip_seq = dev_priv->flip.next_flip_seq++; + struct intel_flip *intel_flip, *next; + unsigned long flags; + struct intel_ring_buffer *ring; + unsigned int rings_mask = 0; for (i = 0; i < dev->mode_config.num_crtc; i++) { struct intel_crtc_state *st = &state->crtc[i]; @@ -2122,6 +2282,8 @@ static void atomic_pipe_commit(struct drm_device *dev, drm_flip_init(&intel_flip->base, &intel_crtc->flip_helper); + intel_flip->flip_seq = flip_seq; + if (st->event) { intel_flip->event = st->event; st->event = NULL; @@ -2134,6 +2296,19 @@ static void atomic_pipe_commit(struct drm_device *dev, intel_flip->crtc = crtc; + if (crtc->fb) { + struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; + + mutex_lock(&dev->struct_mutex); +#ifdef USE_WRITE_SEQNO + intel_flip->seqno = obj->last_write_seqno; +#else + intel_flip->seqno = obj->last_read_seqno; +#endif + intel_flip->ring = obj->ring; + mutex_unlock(&dev->struct_mutex); + } + /* update primary_disabled befoer calc_plane() */ intel_crtc->primary_disabled = st->primary_disabled; @@ -2177,6 +2352,8 @@ static void atomic_pipe_commit(struct drm_device *dev, drm_flip_init(&intel_flip->base, &intel_plane->flip_helper); + intel_flip->flip_seq = flip_seq; + if (st->event) { intel_flip->event = st->event; st->event = NULL; @@ -2190,6 +2367,19 @@ static void atomic_pipe_commit(struct drm_device *dev, intel_flip->crtc = intel_get_crtc_for_pipe(dev, pipe); intel_flip->plane = plane; + if (plane->fb) { + struct drm_i915_gem_object *obj = to_intel_framebuffer(plane->fb)->obj; + + mutex_lock(&dev->struct_mutex); +#ifdef USE_WRITE_SEQNO + intel_flip->seqno = obj->last_write_seqno; +#else + intel_flip->seqno = obj->last_read_seqno; +#endif + intel_flip->ring = obj->ring; + mutex_unlock(&dev->struct_mutex); + } + intel_plane->calc(plane, plane->fb, &st->coords); if (st->old.fb) @@ -2209,7 +2399,42 @@ static void atomic_pipe_commit(struct drm_device *dev, if (list_empty(&flips)) return; - intel_atomic_schedule_flips(dev_priv, intel_crtc, &flips); + mutex_lock(&dev->struct_mutex); + + list_for_each_entry(intel_flip, &flips, base.list) { + struct intel_ring_buffer *ring = intel_flip->ring; + + if (!ring) + continue; + + if (intel_atomic_postpone_flip(intel_flip)) + rings_mask |= intel_ring_flag(ring); + } + + spin_lock_irqsave(&dev_priv->flip.lock, flags); + + list_for_each_entry_safe(intel_flip, next, &flips, base.list) + list_move_tail(&intel_flip->base.list, &dev_priv->flip.list); + + /* if no rings are involved, we can avoid checking seqnos */ + if (rings_mask == 0) + intel_atomic_check_flips_ready(dev); + + spin_unlock_irqrestore(&dev_priv->flip.lock, flags); + + mutex_unlock(&dev->struct_mutex); + + if (rings_mask == 0) + return; + + /* + * Double check to catch cases where the irq + * fired before the flip was placed onto flip.list. + */ + for_each_ring(ring, dev_priv, i) { + if (rings_mask & intel_ring_flag(ring)) + intel_atomic_notify_ring(dev, ring); + } } void intel_atomic_handle_vblank(struct drm_device *dev, int pipe) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8eabf40..319aac8 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -722,6 +722,7 @@ extern void intel_atomic_fini(struct drm_device *dev); extern void intel_atomic_free_events(struct drm_device *dev, struct drm_file *file); extern void intel_atomic_handle_vblank(struct drm_device *dev, int pipe); extern void intel_atomic_clear_flips(struct drm_crtc *crtc); +extern void intel_atomic_notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring); extern void intel_enable_primary(struct drm_crtc *crtc); extern void intel_disable_primary(struct drm_crtc *crtc);