From patchwork Tue Oct 5 10:43:03 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Thomas Hellstrom X-Patchwork-Id: 232051 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o95AjsdG002659 for ; Tue, 5 Oct 2010 10:46:17 GMT Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 879539F4BD for ; Tue, 5 Oct 2010 03:45:54 -0700 (PDT) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from smtp-outbound-1.vmware.com (smtp-outbound-1.vmware.com [65.115.85.69]) by gabe.freedesktop.org (Postfix) with ESMTP id 42F579F376 for ; Tue, 5 Oct 2010 03:43:24 -0700 (PDT) Received: from mailhost2.vmware.com (mailhost2.vmware.com [10.16.67.167]) by smtp-outbound-1.vmware.com (Postfix) with ESMTP id 24C72D00E; Tue, 5 Oct 2010 03:43:24 -0700 (PDT) Received: from localhost.localdomain (sslvpn-dhcp137.eng.vmware.com [10.20.254.137]) by mailhost2.vmware.com (Postfix) with ESMTP id 127798E75C; Tue, 5 Oct 2010 03:43:21 -0700 (PDT) From: Thomas Hellstrom To: airlied@redhat.com Subject: [PATCH 05/11] vmwgfx: Take the ttm lock around the dirty ioctl Date: Tue, 5 Oct 2010 12:43:03 +0200 Message-Id: <1286275389-26641-6-git-send-email-thellstrom@vmware.com> X-Mailer: git-send-email 1.6.2.5 In-Reply-To: <1286275389-26641-5-git-send-email-thellstrom@vmware.com> References: <1286275389-26641-1-git-send-email-thellstrom@vmware.com> <1286275389-26641-2-git-send-email-thellstrom@vmware.com> <1286275389-26641-3-git-send-email-thellstrom@vmware.com> <1286275389-26641-4-git-send-email-thellstrom@vmware.com> <1286275389-26641-5-git-send-email-thellstrom@vmware.com> Cc: Thomas Hellstrom , dri-devel@lists.freedesktop.org X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.11 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Tue, 05 Oct 2010 10:47:02 +0000 (UTC) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 45e57d3..9350d48 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -597,6 +597,8 @@ static void vmw_lastclose(struct drm_device *dev) static void vmw_master_init(struct vmw_master *vmaster) { ttm_lock_init(&vmaster->lock); + INIT_LIST_HEAD(&vmaster->fb_surf); + mutex_init(&vmaster->fb_surf_mutex); } static int vmw_master_create(struct drm_device *dev, @@ -608,7 +610,7 @@ static int vmw_master_create(struct drm_device *dev, if (unlikely(vmaster == NULL)) return -ENOMEM; - ttm_lock_init(&vmaster->lock); + vmw_master_init(vmaster); ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); master->driver_priv = vmaster; @@ -699,6 +701,7 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); + vmw_kms_idle_workqueues(vmaster); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 132cc24..0ab53d9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -151,6 +151,8 @@ struct vmw_overlay; struct vmw_master { struct ttm_lock lock; + struct mutex fb_surf_mutex; + struct list_head fb_surf; }; struct vmw_vga_topology_state { @@ -519,6 +521,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned bbp, unsigned depth); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +void vmw_kms_idle_workqueues(struct vmw_master *vmaster); u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 073b3e1..82bd3d8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -332,18 +332,55 @@ struct vmw_framebuffer_surface { struct delayed_work d_work; struct mutex work_lock; bool present_fs; + struct list_head head; + struct drm_master *master; }; +/** + * vmw_kms_idle_workqueues - Flush workqueues on this master + * + * @vmaster - Pointer identifying the master, for the surfaces of which + * we idle the dirty work queues. + * + * This function should be called with the ttm lock held in exclusive mode + * to idle all dirty work queues before the fifo is taken down. + * + * The work task may actually requeue itself, but after the flush returns we're + * sure that there's nothing to present, since the ttm lock is held in + * exclusive mode, so the fifo will never get used. + */ + +void vmw_kms_idle_workqueues(struct vmw_master *vmaster) +{ + struct vmw_framebuffer_surface *entry; + + mutex_lock(&vmaster->fb_surf_mutex); + list_for_each_entry(entry, &vmaster->fb_surf, head) { + if (cancel_delayed_work_sync(&entry->d_work)) + (void) entry->d_work.work.func(&entry->d_work.work); + + (void) cancel_delayed_work_sync(&entry->d_work); + } + mutex_unlock(&vmaster->fb_surf_mutex); +} + void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { - struct vmw_framebuffer_surface *vfb = + struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_master *vmaster = vmw_master(vfbs->master); + - cancel_delayed_work_sync(&vfb->d_work); + mutex_lock(&vmaster->fb_surf_mutex); + list_del(&vfbs->head); + mutex_unlock(&vmaster->fb_surf_mutex); + + cancel_delayed_work_sync(&vfbs->d_work); + drm_master_put(&vfbs->master); drm_framebuffer_cleanup(framebuffer); - vmw_surface_unreference(&vfb->surface); + vmw_surface_unreference(&vfbs->surface); - kfree(framebuffer); + kfree(vfbs); } static void vmw_framebuffer_present_fs_callback(struct work_struct *work) @@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work) SVGA3dCopyRect cr; } *cmd; + /** + * Strictly we should take the ttm_lock in read mode before accessing + * the fifo, to make sure the fifo is present and up. However, + * instead we flush all workqueues under the ttm lock in exclusive mode + * before taking down the fifo. + */ mutex_lock(&vfbs->work_lock); if (!vfbs->present_fs) goto out_unlock; @@ -398,12 +441,14 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, unsigned num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); struct vmw_surface *surf = vfbs->surface; struct drm_clip_rect norect; SVGA3dCopyRect *cr; int i, inc = 1; + int ret; struct { SVGA3dCmdHeader header; @@ -411,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, SVGA3dCopyRect cr; } *cmd; + if (unlikely(vfbs->master != file_priv->master)) + return -EINVAL; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + if (!num_clips || !(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT)) { @@ -426,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, */ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); } + ttm_read_unlock(&vmaster->lock); return 0; } @@ -443,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); + ttm_read_unlock(&vmaster->lock); return -ENOMEM; } @@ -462,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, } vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); - + ttm_read_unlock(&vmaster->lock); return 0; } @@ -473,6 +527,7 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { }; static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, + struct drm_file *file_priv, struct vmw_surface *surface, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd @@ -482,6 +537,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_surface *vfbs; enum SVGA3dSurfaceFormat format; + struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; /* @@ -546,8 +602,14 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, vfbs->base.pin = &vmw_surface_dmabuf_pin; vfbs->base.unpin = &vmw_surface_dmabuf_unpin; vfbs->surface = surface; + vfbs->master = drm_master_get(file_priv->master); mutex_init(&vfbs->work_lock); + + mutex_lock(&vmaster->fb_surf_mutex); INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); + list_add_tail(&vfbs->head, &vmaster->fb_surf); + mutex_unlock(&vmaster->fb_surf_mutex); + *out = &vfbs->base; return 0; @@ -590,13 +652,19 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, unsigned num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_clip_rect norect; + int ret; struct { uint32_t header; SVGAFifoCmdUpdate body; } *cmd; int i, increment = 1; + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + if (!num_clips) { num_clips = 1; clips = &norect; @@ -611,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); + ttm_read_unlock(&vmaster->lock); return -ENOMEM; } @@ -623,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, } vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); + ttm_read_unlock(&vmaster->lock); return 0; } @@ -795,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, if (!surface->scanout) goto err_not_scanout; - ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, - mode_cmd); + ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, + &vfb, mode_cmd); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface);