@@ -817,9 +817,9 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_pte_t *pt_vaddr;
- unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
- unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
- unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+ unsigned pdpe = gen8_pdpe_index(start);
+ unsigned pde = gen8_pde_index(start);
+ unsigned pte = gen8_pte_index(start);
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
@@ -869,12 +869,24 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
-
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch);
- gen8_ppgtt_clear_pte_range(vm, pdp, start, length, scratch_pte);
+ if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
+ scratch_pte);
+ } else {
+ uint64_t templ4, pml4e;
+ struct i915_page_directory_pointer *pdp;
+
+ gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+ uint64_t pdp_len = gen8_clamp_pdp(start, length);
+ uint64_t pdp_start = start;
+
+ gen8_ppgtt_clear_pte_range(vm, pdp, pdp_start, pdp_len,
+ scratch_pte);
+ }
+ }
}
static void
@@ -887,9 +899,9 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_pte_t *pt_vaddr;
- unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
- unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
- unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+ unsigned pdpe = gen8_pdpe_index(start);
+ unsigned pde = gen8_pde_index(start);
+ unsigned pte = gen8_pte_index(start);
pt_vaddr = NULL;
@@ -907,7 +919,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- pdpe++;
+ if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ break;
pde = 0;
}
pte = 0;
@@ -926,11 +939,25 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
struct sg_page_iter sg_iter;
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, start, cache_level);
+
+ if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
+ cache_level);
+ } else {
+ struct i915_page_directory_pointer *pdp;
+ uint64_t templ4, pml4e;
+ uint64_t length = (uint64_t)sg_nents(pages->sgl) << PAGE_SHIFT;
+
+ gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+ uint64_t pdp_start = start;
+
+ gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
+ pdp_start, cache_level);
+ }
+ }
}
static void gen8_free_page_tables(struct drm_device *dev,
@@ -472,6 +472,17 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
gen8_for_each_pdpe_e(pd, pdp, start, length, temp, iter, I915_PDPES_PER_PDP(dev))
+/* Clamp length to the next page_directory pointer boundary */
+static inline uint64_t gen8_clamp_pdp(uint64_t start, uint64_t length)
+{
+ uint64_t next_pdp = ALIGN(start + 1, 1ULL << GEN8_PML4E_SHIFT);
+
+ if (next_pdp > (start + length))
+ return length;
+
+ return next_pdp - start;
+}
+
static inline uint32_t gen8_pte_index(uint64_t address)
{
return i915_pte_index(address, GEN8_PDE_SHIFT);
When 48b is enabled, gen8_ppgtt_insert_entries needs to read the Page Map Level 4 (PML4), before it selects which Page Directory Pointer (PDP) it will write to. Similarly, gen8_ppgtt_clear_range needs to get the correct PDP/PD range. This patch was inspired by Ben's "Depend exclusively on map and unmap_vma". v2: Rebase after s/page_tables/page_table/. v3: Remove unnecessary pdpe loop in gen8_ppgtt_clear_range_4lvl and use clamp_pdp in gen8_ppgtt_insert_entries (Akash). v4: Merge gen8_ppgtt_clear_range_4lvl into gen8_ppgtt_clear_range to maintain symmetry with gen8_ppgtt_insert_entries (Akash). v5: Do not mix pages and bytes in insert_entries (Akash). v6: Prevent overflow in sg_nents << PAGE_SHIFT, when inserting 4GB at once. v7: Rebase after Mika's ppgtt cleanup / scratch merge patch series. Use gen8_px_index functions, and remove unnecessary number of pages parameter in insert_pte_entries. Cc: Akash Goel <akash.goel@intel.com> Signed-off-by: Michel Thierry <michel.thierry@intel.com> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 51 ++++++++++++++++++++++++++++--------- drivers/gpu/drm/i915/i915_gem_gtt.h | 11 ++++++++ 2 files changed, 50 insertions(+), 12 deletions(-)