From patchwork Tue Nov 12 12:34:10 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 3172021 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id DCDA2C045B for ; Tue, 12 Nov 2013 12:40:34 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 69EDD20225 for ; Tue, 12 Nov 2013 12:40:33 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 0508C20266 for ; Tue, 12 Nov 2013 12:40:32 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 69762FAA1B; Tue, 12 Nov 2013 04:40:28 -0800 (PST) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mblankhorst.nl (mblankhorst.nl [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTP id A5D18FAA06; Tue, 12 Nov 2013 04:40:00 -0800 (PST) Received: from patser.local (5ED49945.cm-7-5c.dynamic.ziggo.nl [94.212.153.69]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) (Authenticated sender: mlankhorst) by mblankhorst.nl (Postfix) with ESMTPSA id 3CD3118C0CA; Tue, 12 Nov 2013 13:33:53 +0100 (CET) From: maarten.lankhorst@canonical.com To: bskeggs@redhat.com Subject: [PATCH 3/7] drm/nouveau: fixup locking inversion between mmap_sem and reservations Date: Tue, 12 Nov 2013 13:34:10 +0100 Message-Id: <1384259656-29483-3-git-send-email-maarten.lankhorst@canonical.com> X-Mailer: git-send-email 1.8.4 In-Reply-To: <1384259656-29483-1-git-send-email-maarten.lankhorst@canonical.com> References: <1384259656-29483-1-git-send-email-maarten.lankhorst@canonical.com> Cc: nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: dri-devel-bounces@lists.freedesktop.org Errors-To: dri-devel-bounces@lists.freedesktop.org X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Maarten Lankhorst Allocate and copy all kernel memory before doing reservations. This prevents a locking inversion between mmap_sem and reservation_class, and allows us to drop the trylocking in ttm_bo_vm_fault without upsetting lockdep. Relocations are handled by trying with __copy_from_user_inatomic first. If that fails all validation will be undone, memory copied from userspace and all validations retried. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/nouveau/nouveau_gem.c | 188 +++++++++++++++++++++------------- 1 file changed, 119 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index f32b712..41a4bf6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -464,8 +464,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, uint64_t user_pbbo_ptr) { struct nouveau_drm *drm = chan->drm; - struct drm_nouveau_gem_pushbuf_bo __user *upbbo = - (void __force __user *)(uintptr_t)user_pbbo_ptr; struct nouveau_bo *nvbo; int ret, relocs = 0; @@ -499,7 +497,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, return ret; } - if (nv_device(drm->device)->card_type < NV_50) { + if (nv_device(drm->device)->card_type < NV_50 && !relocs) { if (nvbo->bo.offset == b->presumed.offset && ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || @@ -507,32 +505,53 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) continue; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) - b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; - else - b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; - b->presumed.offset = nvbo->bo.offset; - b->presumed.valid = 0; - relocs++; - - if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, - &b->presumed, sizeof(b->presumed))) - return -EFAULT; + relocs = 1; } } return relocs; } +static inline void * +u_memcpya(uint64_t user, unsigned nmemb, unsigned size, unsigned inatomic) +{ + void *mem; + void __user *userptr = (void __force __user *)(uintptr_t)user; + + mem = drm_malloc_ab(size, nmemb); + if (!mem) + return ERR_PTR(-ENOMEM); + size *= nmemb; + + if (inatomic && (!access_ok(VERIFY_READ, userptr, size) || + __copy_from_user_inatomic(mem, userptr, size))) { + drm_free_large(mem); + return ERR_PTR(-EFAULT); + } else if (!inatomic && copy_from_user(mem, userptr, size)) { + drm_free_large(mem); + return ERR_PTR(-EFAULT); + } + + return mem; +} + +static int +nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, + struct drm_nouveau_gem_pushbuf *req, + struct drm_nouveau_gem_pushbuf_bo *bo, + struct drm_nouveau_gem_pushbuf_reloc *reloc); + static int nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, struct drm_file *file_priv, struct drm_nouveau_gem_pushbuf_bo *pbbo, + struct drm_nouveau_gem_pushbuf *req, uint64_t user_buffers, int nr_buffers, - struct validate_op *op, int *apply_relocs) + struct validate_op *op, int *do_reloc) { struct nouveau_cli *cli = nouveau_cli(file_priv); int ret, relocs = 0; + struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; INIT_LIST_HEAD(&op->vram_list); INIT_LIST_HEAD(&op->gart_list); @@ -541,19 +560,19 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, if (nr_buffers == 0) return 0; +restart: ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); if (unlikely(ret)) { if (ret != -ERESTARTSYS) NV_ERROR(cli, "validate_init\n"); - return ret; + goto err; } ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) NV_ERROR(cli, "validate vram_list\n"); - validate_fini(op, NULL); - return ret; + goto err_fini; } relocs += ret; @@ -561,8 +580,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) NV_ERROR(cli, "validate gart_list\n"); - validate_fini(op, NULL); - return ret; + goto err_fini; } relocs += ret; @@ -570,58 +588,90 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) NV_ERROR(cli, "validate both_list\n"); - validate_fini(op, NULL); - return ret; + goto err_fini; } relocs += ret; + if (relocs) { + if (!reloc) + reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc), 1); + if (IS_ERR(reloc)) { + validate_fini(op, NULL); + + if (PTR_ERR(reloc) == -EFAULT) { + reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc), 0); + if (!IS_ERR(reloc)) + goto restart; + } + + return PTR_ERR(reloc); + } + + ret = nouveau_gem_pushbuf_reloc_apply(cli, req, pbbo, reloc); + if (ret) { + NV_ERROR(cli, "reloc apply: %d\n", ret); + goto err_fini; + } + drm_free_large(reloc); + *do_reloc = 1; + } - *apply_relocs = relocs; return 0; -} -static inline void -u_free(void *addr) -{ - if (!is_vmalloc_addr(addr)) - kfree(addr); - else - vfree(addr); +err_fini: + validate_fini(op, NULL); +err: + drm_free_large(reloc); + return ret; } -static inline void * -u_memcpya(uint64_t user, unsigned nmemb, unsigned size) +static int +nouveau_gem_pushbuf_reloc_copy_to_user(struct drm_nouveau_gem_pushbuf *req, + struct drm_nouveau_gem_pushbuf_bo *bo) { - void *mem; - void __user *userptr = (void __force __user *)(uintptr_t)user; - - size *= nmemb; + struct drm_nouveau_gem_pushbuf_bo __user *upbbo = + (void __force __user *)(uintptr_t)req->buffers; + unsigned i; - mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!mem) - mem = vmalloc(size); - if (!mem) - return ERR_PTR(-ENOMEM); + for (i = 0; i < req->nr_buffers; ++i) { + struct drm_nouveau_gem_pushbuf_bo *b = &bo[i]; - if (DRM_COPY_FROM_USER(mem, userptr, size)) { - u_free(mem); - return ERR_PTR(-EFAULT); + if (!b->presumed.valid && + copy_to_user(&upbbo[i].presumed, &b->presumed, sizeof(b->presumed))) + return -EFAULT; } - - return mem; + return 0; } static int nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct drm_nouveau_gem_pushbuf *req, - struct drm_nouveau_gem_pushbuf_bo *bo) + struct drm_nouveau_gem_pushbuf_bo *bo, + struct drm_nouveau_gem_pushbuf_reloc *reloc) { - struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; int ret = 0; unsigned i; - reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); - if (IS_ERR(reloc)) - return PTR_ERR(reloc); + for (i = 0; i < req->nr_buffers; ++i) { + struct drm_nouveau_gem_pushbuf_bo *b = &bo[i]; + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[i].user_priv; + + if (nvbo->bo.offset == b->presumed.offset && + ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || + (nvbo->bo.mem.mem_type == TTM_PL_TT && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) { + b->presumed.valid = 1; + continue; + } + + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; + else + b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; + b->presumed.offset = nvbo->bo.offset; + b->presumed.valid = 0; + } for (i = 0; i < req->nr_relocs; i++) { struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; @@ -688,8 +738,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } - - u_free(reloc); return ret; } @@ -745,13 +793,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, return nouveau_abi16_put(abi16, -EINVAL); } - push = u_memcpya(req->push, req->nr_push, sizeof(*push)); + push = u_memcpya(req->push, req->nr_push, sizeof(*push), 0); if (IS_ERR(push)) return nouveau_abi16_put(abi16, PTR_ERR(push)); - bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo), 0); if (IS_ERR(bo)) { - u_free(push); + drm_free_large(push); return nouveau_abi16_put(abi16, PTR_ERR(bo)); } @@ -765,7 +813,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, } /* Validate buffer list */ - ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, + ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req, req->buffers, req->nr_buffers, &op, &do_reloc); if (ret) { if (ret != -ERESTARTSYS) @@ -773,15 +821,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, goto out_prevalid; } - /* Apply any relocations that are required */ - if (do_reloc) { - ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); - if (ret) { - NV_ERROR(cli, "reloc apply: %d\n", ret); - goto out; - } - } - if (chan->dma.ib_max) { ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); if (ret) { @@ -861,9 +900,20 @@ out: validate_fini(&op, fence); nouveau_fence_unref(&fence); + if (do_reloc && !ret) { + ret = nouveau_gem_pushbuf_reloc_copy_to_user(req, bo); + if (ret) { + NV_ERROR(cli, "error copying presumed back to userspace: %d\n", ret); + /* + * XXX: We return -EFAULT, but command submission + * has already been completed. + */ + } + } + out_prevalid: - u_free(bo); - u_free(push); + drm_free_large(bo); + drm_free_large(push); out_next: if (chan->dma.ib_max) {