From patchwork Tue Oct 7 17:11:19 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michel Thierry X-Patchwork-Id: 5047581 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 262BC9F2F1 for ; Tue, 7 Oct 2014 17:11:57 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 241CF2022A for ; Tue, 7 Oct 2014 17:11:56 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id D17C92022D for ; Tue, 7 Oct 2014 17:11:54 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 8C13D6E118; Tue, 7 Oct 2014 10:11:53 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by gabe.freedesktop.org (Postfix) with ESMTP id 8935989FD7 for ; Tue, 7 Oct 2014 10:11:52 -0700 (PDT) Received: from azsmga001.ch.intel.com ([10.2.17.19]) by orsmga103.jf.intel.com with ESMTP; 07 Oct 2014 10:09:16 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,671,1406617200"; d="scan'208";a="482578126" Received: from michelth-linux.isw.intel.com ([10.102.226.151]) by azsmga001.ch.intel.com with ESMTP; 07 Oct 2014 10:11:46 -0700 From: Michel Thierry To: intel-gfx@lists.freedesktop.org Date: Tue, 7 Oct 2014 18:11:19 +0100 Message-Id: <1412701894-28905-24-git-send-email-michel.thierry@intel.com> X-Mailer: git-send-email 2.0.3 In-Reply-To: <1412701894-28905-1-git-send-email-michel.thierry@intel.com> References: <1412701894-28905-1-git-send-email-michel.thierry@intel.com> Subject: [Intel-gfx] [RFC 23/38] drm/i915: Always dma map page directory allocations X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Ben Widawsky Similar to the patch a few back in the series, we can always map and unmap page directories when we do their allocation and teardown. Page directory pages only exist on gen8+, so this should only effect behavior on those platforms. Signed-off-by: Ben Widawsky Signed-off-by: Michel Thierry --- drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++---------------------------- 1 file changed, 19 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3bb728f..54fbd87 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -310,21 +310,23 @@ err_out: return ret; } -static void __free_pd_single(struct i915_pagedir *pd) +static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev) { + i915_dma_unmap_single(pd, dev); __free_page(pd->page); kfree(pd); } -#define free_pd_single(pd) do { \ +#define free_pd_single(pd, dev) do { \ if ((pd)->page) { \ - __free_pd_single(pd); \ + __free_pd_single(pd, dev); \ } \ } while (0) -static struct i915_pagedir *alloc_pd_single(void) +static struct i915_pagedir *alloc_pd_single(struct drm_device *dev) { struct i915_pagedir *pd; + int ret; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) @@ -336,6 +338,13 @@ static struct i915_pagedir *alloc_pd_single(void) return ERR_PTR(-ENOMEM); } + ret = i915_dma_map_px_single(pd, dev); + if (ret) { + __free_page(pd->page); + kfree(pd); + return ERR_PTR(ret); + } + return pd; } @@ -492,30 +501,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev); - free_pd_single(ppgtt->pdp.pagedir[i]); - } -} - -static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_device *dev = ppgtt->base.dev; - int i, j; - - for (i = 0; i < ppgtt->num_pd_pages; i++) { - /* TODO: In the future we'll support sparse mappings, so this - * will have to change. */ - if (!ppgtt->pdp.pagedir[i]->daddr) - continue; - - i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev); - - for (j = 0; j < I915_PDES_PER_PD; j++) { - struct i915_pagedir *pd = ppgtt->pdp.pagedir[i]; - struct i915_pagetab *pt = pd->page_tables[j]; - dma_addr_t addr = pt->daddr; - if (addr) - i915_dma_unmap_single(pt, dev); - } + free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev); } } @@ -524,7 +510,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen8_ppgtt_dma_unmap_pages(ppgtt); gen8_ppgtt_free(ppgtt); } @@ -554,7 +539,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, int i; for (i = 0; i < max_pdp; i++) { - ppgtt->pdp.pagedir[i] = alloc_pd_single(); + ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev); if (IS_ERR(ppgtt->pdp.pagedir[i])) goto unwind_out; } @@ -566,7 +551,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, unwind_out: while (i--) - free_pd_single(ppgtt->pdp.pagedir[i]); + free_pd_single(ppgtt->pdp.pagedir[i], + ppgtt->base.dev); return -ENOMEM; } @@ -594,19 +580,6 @@ err_out: return ret; } -static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, - const int pdpe) -{ - int ret; - - ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe], - ppgtt->base.dev); - if (ret) - return ret; - - return 0; -} - /** * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -632,16 +605,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) return ret; /* - * 2. Create DMA mappings for the page directories and page tables. - */ - for (i = 0; i < max_pdp; i++) { - ret = gen8_ppgtt_setup_page_directories(ppgtt, i); - if (ret) - goto bail; - } - - /* - * 3. Map all the page directory entires to point to the page tables + * 2. Map all the page directory entires to point to the page tables * we've allocated. * * For now, the PPGTT helper functions all require that the PDEs are @@ -676,11 +640,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_entries, (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); return 0; - -bail: - gen8_ppgtt_dma_unmap_pages(ppgtt); - gen8_ppgtt_free(ppgtt); - return ret; } static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) @@ -975,7 +934,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_entries; i++) free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev); - free_pd_single(&ppgtt->pd); + free_pd_single(&ppgtt->pd, ppgtt->base.dev); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm)