From patchwork Thu Oct 25 18:05:49 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Ville Syrjala X-Patchwork-Id: 1647101 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork1.kernel.org (Postfix) with ESMTP id AC8CD3FE1C for ; Thu, 25 Oct 2012 18:55:39 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 8B2EFA0A7E for ; Thu, 25 Oct 2012 11:55:39 -0700 (PDT) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by gabe.freedesktop.org (Postfix) with ESMTP id BD928A0A3D for ; Thu, 25 Oct 2012 11:18:28 -0700 (PDT) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga101.fm.intel.com with ESMTP; 25 Oct 2012 11:18:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.80,648,1344236400"; d="scan'208";a="240235329" Received: from stinkbox.fi.intel.com (HELO stinkbox) ([10.237.72.168]) by fmsmga002.fm.intel.com with SMTP; 25 Oct 2012 11:08:24 -0700 Received: by stinkbox (sSMTP sendmail emulation); Thu, 25 Oct 2012 21:08:23 +0300 From: ville.syrjala@linux.intel.com To: dri-devel@lists.freedesktop.org Subject: [PATCH 46/51] drm/i915: Add support for atomic modesetting completion events Date: Thu, 25 Oct 2012 21:05:49 +0300 Message-Id: <1351188354-24233-47-git-send-email-ville.syrjala@linux.intel.com> X-Mailer: git-send-email 1.7.8.6 In-Reply-To: <1351188354-24233-1-git-send-email-ville.syrjala@linux.intel.com> References: <1351188354-24233-1-git-send-email-ville.syrjala@linux.intel.com> MIME-Version: 1.0 X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org From: Ville Syrjälä Send completion events when the atomic modesetting operations has finished succesfully. Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_atomic.c | 195 ++++++++++++++++++++++++++++++++++- 1 files changed, 192 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 4899f8c..3adb140 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -53,6 +53,7 @@ struct intel_plane_state { bool dirty; bool pinned; bool changed; + struct drm_pending_atomic_event *event; struct { struct drm_crtc *crtc; @@ -74,6 +75,7 @@ struct intel_crtc_state { unsigned long connectors_bitmask; unsigned long encoders_bitmask; bool changed; + struct drm_pending_atomic_event *event; struct { bool enabled; @@ -922,6 +924,111 @@ int intel_commit_plane_nopin(struct drm_plane *plane, struct drm_framebuffer *fb, const struct intel_plane_coords *coords); +static struct drm_pending_atomic_event *alloc_event(struct drm_device *dev, + struct drm_file *file_priv, + uint64_t user_data) +{ + struct drm_pending_atomic_event *e; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + if (file_priv->event_space < sizeof e->event) { + spin_unlock_irqrestore(&dev->event_lock, flags); + return ERR_PTR(-ENOSPC); + } + + file_priv->event_space -= sizeof e->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + e = kzalloc(sizeof *e, GFP_KERNEL); + if (!e) { + spin_lock_irqsave(&dev->event_lock, flags); + file_priv->event_space += sizeof e->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + return ERR_PTR(-ENOMEM); + } + + e->event.base.type = DRM_EVENT_ATOMIC_COMPLETE; + e->event.base.length = sizeof e->event; + e->event.user_data = user_data; + e->base.event = &e->event.base; + e->base.file_priv = file_priv; + e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; + + return e; +} + +static void free_event(struct drm_pending_atomic_event *e) +{ + e->base.file_priv->event_space += sizeof e->event; + kfree(e); +} + +static void queue_event(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_pending_atomic_event *e) +{ + struct timeval tvbl; + + if (crtc) { + int pipe = to_intel_crtc(crtc)->pipe; + + /* FIXME this is wrong for flips that are completed not at vblank */ + e->event.sequence = drm_vblank_count_and_time(dev, pipe, &tvbl); + e->event.tv_sec = tvbl.tv_sec; + e->event.tv_usec = tvbl.tv_usec; + } else { + e->event.sequence = 0; + e->event.tv_sec = 0; + e->event.tv_usec = 0; + } + + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); +} + +static void queue_remaining_events(struct drm_device *dev, struct intel_atomic_state *s) +{ + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct intel_crtc_state *st = &s->crtc[i]; + + if (st->event) { + if (st->old.fb) + st->event->event.old_fb_id = st->old.fb->base.id; + + spin_lock_irq(&dev->event_lock); + queue_event(dev, st->crtc, st->event); + spin_unlock_irq(&dev->event_lock); + + st->event = NULL; + } + } + + for (i = 0; i < dev->mode_config.num_plane; i++) { + struct intel_plane_state *st = &s->plane[i]; + struct drm_crtc *crtc; + + if (!st->event) + continue; + + crtc = st->plane->crtc; + if (!crtc) + crtc = st->old.crtc; + + if (st->old.fb) + st->event->event.old_fb_id = st->old.fb->base.id; + + spin_lock_irq(&dev->event_lock); + queue_event(dev, crtc, st->event); + spin_unlock_irq(&dev->event_lock); + + st->event = NULL; + } +} + static void swap_old_new(struct drm_device *dev, struct intel_atomic_state *s) { @@ -1426,6 +1533,73 @@ static void update_crtc(struct drm_device *dev, } } +static int alloc_flip_data(struct drm_device *dev, struct intel_atomic_state *s) +{ + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct intel_crtc_state *st = &s->crtc[i]; + + if (st->changed && s->flags & DRM_MODE_ATOMIC_EVENT) { + struct drm_pending_atomic_event *e; + + e = alloc_event(dev, s->file, s->user_data); + if (IS_ERR(e)) + return PTR_ERR(e); + + e->event.obj_id = st->crtc->base.id; + + st->event = e; + } + } + + + for (i = 0; i < dev->mode_config.num_plane; i++) { + struct intel_plane_state *st = &s->plane[i]; + + if (st->changed && s->flags & DRM_MODE_ATOMIC_EVENT) { + struct drm_pending_atomic_event *e; + + e = alloc_event(dev, s->file, s->user_data); + if (IS_ERR(e)) + return PTR_ERR(e); + + e->event.obj_id = st->plane->base.id; + + st->event = e; + } + } + + return 0; +} + +static void free_flip_data(struct drm_device *dev, struct intel_atomic_state *s) +{ + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct intel_crtc_state *st = &s->crtc[i]; + + if (st->event) { + spin_lock_irq(&dev->event_lock); + free_event(st->event); + spin_unlock_irq(&dev->event_lock); + st->event = NULL; + } + } + + for (i = 0; i < dev->mode_config.num_plane; i++) { + struct intel_plane_state *st = &s->plane[i]; + + if (st->event) { + spin_lock_irq(&dev->event_lock); + free_event(st->event); + spin_unlock_irq(&dev->event_lock); + st->event = NULL; + } + } +} + static int intel_atomic_commit(struct drm_device *dev, void *state) { struct intel_atomic_state *s = state; @@ -1434,12 +1608,13 @@ static int intel_atomic_commit(struct drm_device *dev, void *state) if (s->flags & DRM_MODE_ATOMIC_NONBLOCK) return -ENOSYS; - if (s->flags & DRM_MODE_ATOMIC_EVENT) - return -ENOSYS; - if (!s->dirty) return 0; + ret = alloc_flip_data(dev, s); + if (ret) + return ret; + ret = pin_fbs(dev, s); if (ret) return ret; @@ -1460,6 +1635,17 @@ static int intel_atomic_commit(struct drm_device *dev, void *state) unpin_old_cursors(dev, s); unpin_old_fbs(dev, s); + /* + * Either we took the blocking code path, or perhaps the state of + * some objects didn't actually change? Nonetheless the user wanted + * events for all objects he touched, so queue up any events that + * are still pending. + * + * FIXME this needs more work. If the previous flip is still pending + * we shouldn't send this event until that flip completes. + */ + queue_remaining_events(dev, s); + update_plane_obj(dev, s); update_crtc(dev, s); @@ -1473,6 +1659,9 @@ static void intel_atomic_end(struct drm_device *dev, void *state) { struct intel_atomic_state *s = state; + /* don't send events when restoring old state */ + free_flip_data(dev, state); + /* restore the state of all objects */ if (s->restore_state) restore_state(dev, state);