From patchwork Tue Mar 18 05:48:52 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 3847371 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 7D15E9F334 for ; Tue, 18 Mar 2014 05:49:18 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id BC3A22026F for ; Tue, 18 Mar 2014 05:49:15 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id C69E720154 for ; Tue, 18 Mar 2014 05:49:14 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id A38AD2B0E5; Mon, 17 Mar 2014 22:49:13 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTP id 555772B0E3 for ; Mon, 17 Mar 2014 22:49:12 -0700 (PDT) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga101.jf.intel.com with ESMTP; 17 Mar 2014 22:49:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,675,1389772800"; d="scan'208";a="474655792" Received: from unknown (HELO ironside.amr.corp.intel.com) ([10.255.13.186]) by orsmga001.jf.intel.com with ESMTP; 17 Mar 2014 22:49:11 -0700 From: Ben Widawsky To: Intel GFX Date: Mon, 17 Mar 2014 22:48:52 -0700 Message-Id: <1395121738-29126-21-git-send-email-benjamin.widawsky@intel.com> X-Mailer: git-send-email 1.9.0 In-Reply-To: <1395121738-29126-1-git-send-email-benjamin.widawsky@intel.com> References: <1395121738-29126-1-git-send-email-benjamin.widawsky@intel.com> Subject: [Intel-gfx] [PATCH 20/26] drm/i915: Always dma map page directory allocations X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Similar to the patch a few back in the series, we can always map and unmap page directories when we do their allocation and teardown. Page directory pages only exist on gen8+, so this should only effect behavior on those platforms. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++---------------------------- 1 file changed, 19 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index abef33dd..ad2f2c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -283,21 +283,23 @@ err_out: return ret; } -static void __free_pd_single(struct i915_pagedir *pd) +static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev) { + i915_dma_unmap_single(pd, dev); __free_page(pd->page); kfree(pd); } -#define free_pd_single(pd) do { \ +#define free_pd_single(pd, dev) do { \ if ((pd)->page) { \ - __free_pd_single(pd); \ + __free_pd_single(pd, dev); \ } \ } while (0) -static struct i915_pagedir *alloc_pd_single(void) +static struct i915_pagedir *alloc_pd_single(struct drm_device *dev) { struct i915_pagedir *pd; + int ret; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) @@ -309,6 +311,13 @@ static struct i915_pagedir *alloc_pd_single(void) return ERR_PTR(-ENOMEM); } + ret = i915_dma_map_px_single(pd, dev); + if (ret) { + __free_page(pd->page); + kfree(pd); + return ERR_PTR(ret); + } + return pd; } @@ -466,30 +475,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev); - free_pd_single(ppgtt->pdp.pagedir[i]); - } -} - -static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_device *dev = ppgtt->base.dev; - int i, j; - - for (i = 0; i < ppgtt->num_pd_pages; i++) { - /* TODO: In the future we'll support sparse mappings, so this - * will have to change. */ - if (!ppgtt->pdp.pagedir[i]->daddr) - continue; - - i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev); - - for (j = 0; j < I915_PDES_PER_PD; j++) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[i]; - struct i915_pagetab *pt = pd->page_tables[j]; - dma_addr_t addr = pt->daddr; - if (addr) - i915_dma_unmap_single(pt, dev); - } + free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev); } } @@ -501,7 +487,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) list_del(&vm->global_link); drm_mm_takedown(&vm->mm); - gen8_ppgtt_dma_unmap_pages(ppgtt); gen8_ppgtt_free(ppgtt); } @@ -531,7 +516,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, int i; for (i = 0; i < max_pdp; i++) { - ppgtt->pdp.pagedir[i] = alloc_pd_single(); + ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev); if (IS_ERR(ppgtt->pdp.pagedir[i])) goto unwind_out; } @@ -543,7 +528,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, unwind_out: while (i--) - free_pd_single(ppgtt->pdp.pagedir[i]); + free_pd_single(ppgtt->pdp.pagedir[i], + ppgtt->base.dev); return -ENOMEM; } @@ -571,19 +557,6 @@ err_out: return ret; } -static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, - const int pdpe) -{ - int ret; - - ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe], - ppgtt->base.dev); - if (ret) - return ret; - - return 0; -} - /** * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -609,16 +582,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) return ret; /* - * 2. Create DMA mappings for the page directories and page tables. - */ - for (i = 0; i < max_pdp; i++) { - ret = gen8_ppgtt_setup_page_directories(ppgtt, i); - if (ret) - goto bail; - } - - /* - * 3. Map all the page directory entires to point to the page tables + * 2. Map all the page directory entires to point to the page tables * we've allocated. * * For now, the PPGTT helper functions all require that the PDEs are @@ -652,11 +616,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_entries, (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); return 0; - -bail: - gen8_ppgtt_dma_unmap_pages(ppgtt); - gen8_ppgtt_free(ppgtt); - return ret; } static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) @@ -1034,7 +993,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_entries; i++) free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev); - free_pd_single(&ppgtt->pd); + free_pd_single(&ppgtt->pd, ppgtt->base.dev); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm)