Message ID | 1438187043-34267-11-git-send-email-michel.thierry@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Reviewed the patch & it looks fine. Reviewed-by: "Akash Goel <akash.goel@intel.com>" On 7/29/2015 9:53 PM, Michel Thierry wrote: > When 48b is enabled, gen8_ppgtt_insert_entries needs to read the Page Map > Level 4 (PML4), before it selects which Page Directory Pointer (PDP) > it will write to. > > Similarly, gen8_ppgtt_clear_range needs to get the correct PDP/PD range. > > This patch was inspired by Ben's "Depend exclusively on map and > unmap_vma". > > v2: Rebase after s/page_tables/page_table/. > v3: Remove unnecessary pdpe loop in gen8_ppgtt_clear_range_4lvl and use > clamp_pdp in gen8_ppgtt_insert_entries (Akash). > v4: Merge gen8_ppgtt_clear_range_4lvl into gen8_ppgtt_clear_range to > maintain symmetry with gen8_ppgtt_insert_entries (Akash). > v5: Do not mix pages and bytes in insert_entries (Akash). > v6: Prevent overflow in sg_nents << PAGE_SHIFT, when inserting 4GB at > once. > v7: Rebase after Mika's ppgtt cleanup / scratch merge patch series. > Use gen8_px_index functions, and remove unnecessary number of pages > parameter in insert_pte_entries. > v8: Change gen8_ppgtt_clear_pte_range to stop at PDP boundary, instead of > adding and extra clamp function; remove unnecessary pdp_start/pdp_len > variables (Akash). > v9: pages->orig_nents instead of sg_nents(pages->sgl) to get the > length (Akash). > > Cc: Akash Goel <akash.goel@intel.com> > Signed-off-by: Michel Thierry <michel.thierry@intel.com> > --- > drivers/gpu/drm/i915/i915_gem_gtt.c | 49 +++++++++++++++++++++++++++---------- > 1 file changed, 36 insertions(+), 13 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index 7c024e98..7070d42 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -687,9 +687,9 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, > struct i915_hw_ppgtt *ppgtt = > container_of(vm, struct i915_hw_ppgtt, base); > gen8_pte_t *pt_vaddr; > - unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; > - unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; > - unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; > + unsigned pdpe = gen8_pdpe_index(start); > + unsigned pde = gen8_pde_index(start); > + unsigned pte = gen8_pte_index(start); > unsigned num_entries = length >> PAGE_SHIFT; > unsigned last_pte, i; > > @@ -725,7 +725,8 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, > > pte = 0; > if (++pde == I915_PDES) { > - pdpe++; > + if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) > + break; > pde = 0; > } > } > @@ -738,12 +739,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, > { > struct i915_hw_ppgtt *ppgtt = > container_of(vm, struct i915_hw_ppgtt, base); > - struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */ > - > gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), > I915_CACHE_LLC, use_scratch); > > - gen8_ppgtt_clear_pte_range(vm, pdp, start, length, scratch_pte); > + if (!USES_FULL_48BIT_PPGTT(vm->dev)) { > + gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, > + scratch_pte); > + } else { > + uint64_t templ4, pml4e; > + struct i915_page_directory_pointer *pdp; > + > + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { > + gen8_ppgtt_clear_pte_range(vm, pdp, start, length, > + scratch_pte); > + } > + } > } > > static void > @@ -756,9 +766,9 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, > struct i915_hw_ppgtt *ppgtt = > container_of(vm, struct i915_hw_ppgtt, base); > gen8_pte_t *pt_vaddr; > - unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; > - unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; > - unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; > + unsigned pdpe = gen8_pdpe_index(start); > + unsigned pde = gen8_pde_index(start); > + unsigned pte = gen8_pte_index(start); > > pt_vaddr = NULL; > > @@ -776,7 +786,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, > kunmap_px(ppgtt, pt_vaddr); > pt_vaddr = NULL; > if (++pde == I915_PDES) { > - pdpe++; > + if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) > + break; > pde = 0; > } > pte = 0; > @@ -795,11 +806,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, > { > struct i915_hw_ppgtt *ppgtt = > container_of(vm, struct i915_hw_ppgtt, base); > - struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */ > struct sg_page_iter sg_iter; > > __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); > - gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, start, cache_level); > + > + if (!USES_FULL_48BIT_PPGTT(vm->dev)) { > + gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, > + cache_level); > + } else { > + struct i915_page_directory_pointer *pdp; > + uint64_t templ4, pml4e; > + uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; > + > + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { > + gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, > + start, cache_level); > + } > + } > } > > static void gen8_free_page_tables(struct drm_device *dev, >
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7c024e98..7070d42 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -687,9 +687,9 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); gen8_pte_t *pt_vaddr; - unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; - unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; - unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; + unsigned pdpe = gen8_pdpe_index(start); + unsigned pde = gen8_pde_index(start); + unsigned pte = gen8_pte_index(start); unsigned num_entries = length >> PAGE_SHIFT; unsigned last_pte, i; @@ -725,7 +725,8 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, pte = 0; if (++pde == I915_PDES) { - pdpe++; + if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) + break; pde = 0; } } @@ -738,12 +739,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */ - gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, use_scratch); - gen8_ppgtt_clear_pte_range(vm, pdp, start, length, scratch_pte); + if (!USES_FULL_48BIT_PPGTT(vm->dev)) { + gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, + scratch_pte); + } else { + uint64_t templ4, pml4e; + struct i915_page_directory_pointer *pdp; + + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { + gen8_ppgtt_clear_pte_range(vm, pdp, start, length, + scratch_pte); + } + } } static void @@ -756,9 +766,9 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); gen8_pte_t *pt_vaddr; - unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; - unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; - unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; + unsigned pdpe = gen8_pdpe_index(start); + unsigned pde = gen8_pde_index(start); + unsigned pte = gen8_pte_index(start); pt_vaddr = NULL; @@ -776,7 +786,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, kunmap_px(ppgtt, pt_vaddr); pt_vaddr = NULL; if (++pde == I915_PDES) { - pdpe++; + if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) + break; pde = 0; } pte = 0; @@ -795,11 +806,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */ struct sg_page_iter sg_iter; __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); - gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, start, cache_level); + + if (!USES_FULL_48BIT_PPGTT(vm->dev)) { + gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, + cache_level); + } else { + struct i915_page_directory_pointer *pdp; + uint64_t templ4, pml4e; + uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; + + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { + gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, + start, cache_level); + } + } } static void gen8_free_page_tables(struct drm_device *dev,
When 48b is enabled, gen8_ppgtt_insert_entries needs to read the Page Map Level 4 (PML4), before it selects which Page Directory Pointer (PDP) it will write to. Similarly, gen8_ppgtt_clear_range needs to get the correct PDP/PD range. This patch was inspired by Ben's "Depend exclusively on map and unmap_vma". v2: Rebase after s/page_tables/page_table/. v3: Remove unnecessary pdpe loop in gen8_ppgtt_clear_range_4lvl and use clamp_pdp in gen8_ppgtt_insert_entries (Akash). v4: Merge gen8_ppgtt_clear_range_4lvl into gen8_ppgtt_clear_range to maintain symmetry with gen8_ppgtt_insert_entries (Akash). v5: Do not mix pages and bytes in insert_entries (Akash). v6: Prevent overflow in sg_nents << PAGE_SHIFT, when inserting 4GB at once. v7: Rebase after Mika's ppgtt cleanup / scratch merge patch series. Use gen8_px_index functions, and remove unnecessary number of pages parameter in insert_pte_entries. v8: Change gen8_ppgtt_clear_pte_range to stop at PDP boundary, instead of adding and extra clamp function; remove unnecessary pdp_start/pdp_len variables (Akash). v9: pages->orig_nents instead of sg_nents(pages->sgl) to get the length (Akash). Cc: Akash Goel <akash.goel@intel.com> Signed-off-by: Michel Thierry <michel.thierry@intel.com> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 49 +++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 13 deletions(-)