@@ -523,6 +523,36 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
+static void __gen8_do_map_pt(gen8_ppgtt_pde_t *pde,
+ struct i915_pagetab *pt,
+ struct drm_device *dev)
+{
+ gen8_ppgtt_pde_t entry =
+ gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
+ *pde = entry;
+}
+
+/* It's likely we'll map more than one pagetable at a time. This function will
+ * save us unnecessary kmap calls, but do no more functionally than multiple
+ * calls to map_pt. */
+static void gen8_map_pagetable_range(struct i915_pagedir *pd,
+ uint64_t start,
+ uint64_t length,
+ struct drm_device *dev)
+{
+ gen8_ppgtt_pde_t *pagedir = kmap_atomic(pd->page);
+ struct i915_pagetab *pt;
+ uint64_t temp, pde;
+
+ gen8_for_each_pde(pt, pd, start, length, temp, pde)
+ __gen8_do_map_pt(pagedir + pde, pt, dev);
+
+ if (!HAS_LLC(dev))
+ drm_clflush_virt_range(pagedir, PAGE_SIZE);
+
+ kunmap_atomic(pagedir);
+}
+
static void gen8_teardown_va_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
@@ -547,9 +577,6 @@ static void gen8_teardown_va_range(struct i915_address_space *vm,
static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
- trace_i915_va_teardown(&ppgtt->base,
- ppgtt->base.start, ppgtt->base.total,
- VM_TO_TRACE_NAME(&ppgtt->base));
gen8_teardown_va_range(&ppgtt->base,
ppgtt->base.start, ppgtt->base.total);
}
@@ -615,11 +642,14 @@ unwind_out:
return -ENOMEM;
}
-static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
- uint64_t start,
- uint64_t length)
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
struct i915_pagedir *pd;
+ const uint64_t orig_start = start;
uint64_t temp;
uint32_t pdpe;
int ret;
@@ -638,9 +668,8 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
return 0;
- /* TODO: Check this for all cases */
err_out:
- gen8_ppgtt_free(ppgtt);
+ gen8_teardown_va_range(vm, orig_start, start);
return ret;
}
@@ -650,59 +679,37 @@ err_out:
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
* space.
*
- * FIXME: split allocation into smaller pieces. For now we only ever do this
- * once, but with full PPGTT, the multiple contiguous allocations will be bad.
- * TODO: Do something with the size parameter
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
- const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- int i, j, ret;
-
- if (size % (1<<30))
- DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+ struct i915_pagedir *pd;
+ uint64_t temp, start = 0;
+ const uint64_t orig_length = size;
+ uint32_t pdpe;
+ int ret;
ppgtt->base.start = 0;
ppgtt->base.total = size;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->switch_mm = gen8_mm_switch;
ppgtt->scratch_pd = alloc_pt_scratch(ppgtt->base.dev);
if (IS_ERR(ppgtt->scratch_pd))
return PTR_ERR(ppgtt->scratch_pd);
- /* 1. Do all our allocations for page directories and page tables. */
- ret = gen8_ppgtt_alloc(ppgtt, ppgtt->base.start, ppgtt->base.total);
+ ret = gen8_alloc_va_range(&ppgtt->base, start, size);
if (ret) {
free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
return ret;
}
- /*
- * 2. Map all the page directory entires to point to the page tables
- * we've allocated.
- *
- * For now, the PPGTT helper functions all require that the PDEs are
- * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
- * will never need to touch the PDEs again.
- */
- for (i = 0; i < max_pdp; i++) {
- struct i915_pagedir *pd = ppgtt->pdp.pagedirs[i];
- gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(ppgtt->pdp.pagedirs[i]->page);
- for (j = 0; j < I915_PDES_PER_PD; j++) {
- struct i915_pagetab *pt = pd->page_tables[j];
- dma_addr_t addr = pt->daddr;
- pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
- I915_CACHE_LLC);
- }
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
- kunmap_atomic(pd_vaddr);
- }
+ start = 0;
+ size = orig_length;
- ppgtt->switch_mm = gen8_mm_switch;
- ppgtt->base.clear_range = gen8_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe)
+ gen8_map_pagetable_range(pd, start, size, ppgtt->base.dev);
return 0;
}