@@ -1023,7 +1023,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
- uint64_t pd_len = gen8_clamp_pd(start, length);
+ uint64_t pd_len = length;
uint64_t pd_start = start;
uint32_t pde;
@@ -438,17 +438,6 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
temp = min(temp, length), \
start += temp, length -= temp)
-/* Clamp length to the next page_directory boundary */
-static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length)
-{
- uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT);
-
- if (next_pd > (start + length))
- return length;
-
- return next_pd - start;
-}
-
static inline uint32_t gen8_pte_index(uint64_t address)
{
return i915_pte_index(address, GEN8_PDE_SHIFT);
gen8_clamp_pd clamps to the next page directory boundary, but the macro gen8_for_each_pde already has a check to stop at the page directory boundary. Furthermore, i915_pte_count also restricts to the next page table boundary. v2: Rebase after Mika's ppgtt cleanup / scratch merge patch series. Suggested-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Michel Thierry <michel.thierry@intel.com> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-)