@@ -576,6 +576,25 @@ static void gen8_map_pagetable_range(struct i915_pagedir *pd,
kunmap_atomic(pagedir);
}
+static void gen8_map_pagedir(struct i915_pagedir *pd,
+ struct i915_pagetab *pt,
+ int entry,
+ struct drm_device *dev)
+{
+ gen8_ppgtt_pde_t *pagedir = kmap_atomic(pd->page);
+ __gen8_do_map_pt(pagedir + entry, pt, dev);
+ kunmap_atomic(pagedir);
+}
+
+static void gen8_unmap_pagetable(struct i915_hw_ppgtt *ppgtt,
+ struct i915_pagedir *pd,
+ int pde)
+{
+ pd->page_tables[pde] = NULL;
+ WARN_ON(!test_and_clear_bit(pde, pd->used_pdes));
+ gen8_map_pagedir(pd, ppgtt->scratch_pt, pde, ppgtt->base.dev);
+}
+
static void gen8_teardown_va_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
@@ -621,8 +640,10 @@ static void gen8_teardown_va_range(struct i915_address_space *vm,
if (bitmap_empty(pt->used_ptes, GEN8_PTES_PER_PT)) {
free_pt_single(pt, vm->dev);
- pd->page_tables[pde] = NULL;
- WARN_ON(!test_and_clear_bit(pde, pd->used_pdes));
+ /* This may be nixed later. Optimize? */
+ gen8_unmap_pagetable(ppgtt, pd, pde);
+ } else {
+ gen8_ppgtt_clear_range(vm, pd_start, pd_len, true);
}
}
This is probably not required since BDW is hopefully a bit more robust that previous generations. Realize also that scratch will not exist for every entry within the page table structure. Doing this would waste an extraordinary amount of space when we move to 4 level page tables. Therefore, the scratch pages/tables will only be pointed to by page tables which have less than all of the entries filled. I wrote the patch while debugging so I figured why not put it in the series. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-)