From patchwork Fri Aug 22 03:12:03 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 4760961 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 5A3C19F377 for ; Fri, 22 Aug 2014 03:13:58 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 68053201BF for ; Fri, 22 Aug 2014 03:13:57 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 5B65C20155 for ; Fri, 22 Aug 2014 03:13:56 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 39C9F6E88E; Thu, 21 Aug 2014 20:13:52 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by gabe.freedesktop.org (Postfix) with ESMTP id 747056E862 for ; Thu, 21 Aug 2014 20:13:32 -0700 (PDT) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga103.fm.intel.com with ESMTP; 21 Aug 2014 20:05:44 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="375575997" Received: from unknown (HELO ironside.intel.com) ([10.255.12.192]) by FMSMGA003.fm.intel.com with ESMTP; 21 Aug 2014 20:09:39 -0700 From: Ben Widawsky To: Intel GFX Date: Thu, 21 Aug 2014 20:12:03 -0700 Message-Id: <1408677155-1840-41-git-send-email-benjamin.widawsky@intel.com> X-Mailer: git-send-email 2.0.4 In-Reply-To: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com> References: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com> Cc: Ben Widawsky , Ben Widawsky Subject: [Intel-gfx] [PATCH 40/68] drm/i915: Always dma map page directory allocations X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Similar to the patch a few back in the series, we can always map and unmap page directories when we do their allocation and teardown. Page directory pages only exist on gen8+, so this should only effect behavior on those platforms. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++---------------------------- 1 file changed, 19 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 205d5c6..094a82f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -303,21 +303,23 @@ err_out: return ret; } -static void __free_pd_single(struct i915_pagedir *pd) +static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev) { + i915_dma_unmap_single(pd, dev); __free_page(pd->page); kfree(pd); } -#define free_pd_single(pd) do { \ +#define free_pd_single(pd, dev) do { \ if ((pd)->page) { \ - __free_pd_single(pd); \ + __free_pd_single(pd, dev); \ } \ } while (0) -static struct i915_pagedir *alloc_pd_single(void) +static struct i915_pagedir *alloc_pd_single(struct drm_device *dev) { struct i915_pagedir *pd; + int ret; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) @@ -329,6 +331,13 @@ static struct i915_pagedir *alloc_pd_single(void) return ERR_PTR(-ENOMEM); } + ret = i915_dma_map_px_single(pd, dev); + if (ret) { + __free_page(pd->page); + kfree(pd); + return ERR_PTR(ret); + } + return pd; } @@ -493,30 +502,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev); - free_pd_single(ppgtt->pdp.pagedir[i]); - } -} - -static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_device *dev = ppgtt->base.dev; - int i, j; - - for (i = 0; i < ppgtt->num_pd_pages; i++) { - /* TODO: In the future we'll support sparse mappings, so this - * will have to change. */ - if (!ppgtt->pdp.pagedir[i]->daddr) - continue; - - i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev); - - for (j = 0; j < I915_PDES_PER_PD; j++) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[i]; - struct i915_pagetab *pt = pd->page_tables[j]; - dma_addr_t addr = pt->daddr; - if (addr) - i915_dma_unmap_single(pt, dev); - } + free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev); } } @@ -528,7 +514,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) list_del(&vm->global_link); drm_mm_takedown(&vm->mm); - gen8_ppgtt_dma_unmap_pages(ppgtt); gen8_ppgtt_free(ppgtt); } @@ -558,7 +543,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, int i; for (i = 0; i < max_pdp; i++) { - ppgtt->pdp.pagedir[i] = alloc_pd_single(); + ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev); if (IS_ERR(ppgtt->pdp.pagedir[i])) goto unwind_out; } @@ -570,7 +555,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, unwind_out: while (i--) - free_pd_single(ppgtt->pdp.pagedir[i]); + free_pd_single(ppgtt->pdp.pagedir[i], + ppgtt->base.dev); return -ENOMEM; } @@ -598,19 +584,6 @@ err_out: return ret; } -static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, - const int pdpe) -{ - int ret; - - ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe], - ppgtt->base.dev); - if (ret) - return ret; - - return 0; -} - /** * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -636,16 +609,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) return ret; /* - * 2. Create DMA mappings for the page directories and page tables. - */ - for (i = 0; i < max_pdp; i++) { - ret = gen8_ppgtt_setup_page_directories(ppgtt, i); - if (ret) - goto bail; - } - - /* - * 3. Map all the page directory entires to point to the page tables + * 2. Map all the page directory entires to point to the page tables * we've allocated. * * For now, the PPGTT helper functions all require that the PDEs are @@ -681,11 +645,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_entries, (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); return 0; - -bail: - gen8_ppgtt_dma_unmap_pages(ppgtt); - gen8_ppgtt_free(ppgtt); - return ret; } static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) @@ -1063,7 +1022,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_entries; i++) free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev); - free_pd_single(&ppgtt->pd); + free_pd_single(&ppgtt->pd, ppgtt->base.dev); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm)