diff mbox

[RFC,23/38] drm/i915: Always dma map page directory allocations

Message ID 1412701894-28905-24-git-send-email-michel.thierry@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michel Thierry Oct. 7, 2014, 5:11 p.m. UTC
From: Ben Widawsky <benjamin.widawsky@intel.com>

Similar to the patch a few back in the series, we can always map and
unmap page directories when we do their allocation and teardown. Page
directory pages only exist on gen8+, so this should only effect behavior
on those platforms.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++----------------------------
 1 file changed, 19 insertions(+), 60 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3bb728f..54fbd87 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -310,21 +310,23 @@  err_out:
 	return ret;
 }
 
-static void __free_pd_single(struct i915_pagedir *pd)
+static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev)
 {
+	i915_dma_unmap_single(pd, dev);
 	__free_page(pd->page);
 	kfree(pd);
 }
 
-#define free_pd_single(pd) do { \
+#define free_pd_single(pd, dev) do { \
 	if ((pd)->page) { \
-		__free_pd_single(pd); \
+		__free_pd_single(pd, dev); \
 	} \
 } while (0)
 
-static struct i915_pagedir *alloc_pd_single(void)
+static struct i915_pagedir *alloc_pd_single(struct drm_device *dev)
 {
 	struct i915_pagedir *pd;
+	int ret;
 
 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 	if (!pd)
@@ -336,6 +338,13 @@  static struct i915_pagedir *alloc_pd_single(void)
 		return ERR_PTR(-ENOMEM);
 	}
 
+	ret = i915_dma_map_px_single(pd, dev);
+	if (ret) {
+		__free_page(pd->page);
+		kfree(pd);
+		return ERR_PTR(ret);
+	}
+
 	return pd;
 }
 
@@ -492,30 +501,7 @@  static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 
 	for (i = 0; i < ppgtt->num_pd_pages; i++) {
 		gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
-		free_pd_single(ppgtt->pdp.pagedir[i]);
-	}
-}
-
-static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
-{
-	struct drm_device *dev = ppgtt->base.dev;
-	int i, j;
-
-	for (i = 0; i < ppgtt->num_pd_pages; i++) {
-		/* TODO: In the future we'll support sparse mappings, so this
-		 * will have to change. */
-		if (!ppgtt->pdp.pagedir[i]->daddr)
-			continue;
-
-		i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev);
-
-		for (j = 0; j < I915_PDES_PER_PD; j++) {
-			struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
-			struct i915_pagetab *pt =  pd->page_tables[j];
-			dma_addr_t addr = pt->daddr;
-			if (addr)
-				i915_dma_unmap_single(pt, dev);
-		}
+		free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
 	}
 }
 
@@ -524,7 +510,6 @@  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
 
-	gen8_ppgtt_dma_unmap_pages(ppgtt);
 	gen8_ppgtt_free(ppgtt);
 }
 
@@ -554,7 +539,7 @@  static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
 	int i;
 
 	for (i = 0; i < max_pdp; i++) {
-		ppgtt->pdp.pagedir[i] = alloc_pd_single();
+		ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev);
 		if (IS_ERR(ppgtt->pdp.pagedir[i]))
 			goto unwind_out;
 	}
@@ -566,7 +551,8 @@  static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	while (i--)
-		free_pd_single(ppgtt->pdp.pagedir[i]);
+		free_pd_single(ppgtt->pdp.pagedir[i],
+			       ppgtt->base.dev);
 
 	return -ENOMEM;
 }
@@ -594,19 +580,6 @@  err_out:
 	return ret;
 }
 
-static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
-					     const int pdpe)
-{
-	int ret;
-
-	ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe],
-				     ppgtt->base.dev);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 /**
  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
  * with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -632,16 +605,7 @@  static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 		return ret;
 
 	/*
-	 * 2. Create DMA mappings for the page directories and page tables.
-	 */
-	for (i = 0; i < max_pdp; i++) {
-		ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
-		if (ret)
-			goto bail;
-	}
-
-	/*
-	 * 3. Map all the page directory entires to point to the page tables
+	 * 2. Map all the page directory entires to point to the page tables
 	 * we've allocated.
 	 *
 	 * For now, the PPGTT helper functions all require that the PDEs are
@@ -676,11 +640,6 @@  static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 			 ppgtt->num_pd_entries,
 			 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
 	return 0;
-
-bail:
-	gen8_ppgtt_dma_unmap_pages(ppgtt);
-	gen8_ppgtt_free(ppgtt);
-	return ret;
 }
 
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
@@ -975,7 +934,7 @@  static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 	for (i = 0; i < ppgtt->num_pd_entries; i++)
 		free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev);
 
-	free_pd_single(&ppgtt->pd);
+	free_pd_single(&ppgtt->pd, ppgtt->base.dev);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)