@@ -488,29 +488,50 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen8_free_page_tables(struct i915_pagedir *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct i915_pagedir *pd,
+ uint64_t start, uint64_t length,
+ struct drm_device *dev)
{
int i;
if (!pd->page)
return;
- for (i = 0; i < I915_PDES_PER_PD; i++) {
+ for (i = gen8_pte_index(start);
+ length && i < GEN8_PTES_PER_PT; i++, length -= PAGE_SIZE) {
+ if (!pd->page_tables[i])
+ continue;
+
free_pt_single(pd->page_tables[i], dev);
pd->page_tables[i] = NULL;
}
}
-static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+static void gen8_teardown_va_range(struct i915_hw_ppgtt *ppgtt,
+ uint64_t start, uint64_t length)
{
- int i;
+ struct drm_device *dev = ppgtt->base.dev;
+ struct i915_pagedir *pd;
+ struct i915_pagetab *pt;
+ uint64_t temp, temp2;
+ uint32_t pdpe, pde;
+
+ gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+ uint64_t pd_start = start;
+ uint64_t pd_len = gen8_bound_pt(start, length);
+ gen8_for_each_pde(pt, pd, pd_start, pd_len, temp2, pde) {
+ gen8_free_page_tables(pd, pd_start, pd_len, dev);
+ }
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
- free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
+ free_pd_single(pd, dev);
}
}
+static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+{
+ gen8_teardown_va_range(ppgtt, ppgtt->base.start, ppgtt->base.total);
+}
+
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
@@ -537,41 +558,75 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
unwind_out:
while (i--)
- gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
+ gen8_free_page_tables(ppgtt->pdp.pagedir[i],
+ i * I915_PDES_PER_PD * GEN8_PTES_PER_PT,
+ (i + 1)* I915_PDES_PER_PD * GEN8_PTES_PER_PT,
+ ppgtt->base.dev);
return -ENOMEM;
}
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
+ uint64_t start, uint64_t length)
{
- int i;
+ struct i915_pagedir *unused;
+ uint64_t temp;
+ uint32_t pdpe;
- for (i = 0; i < max_pdp; i++) {
- ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->pdp.pagedir[i]))
- goto unwind_out;
+ gen8_for_each_pdpe(unused, &ppgtt->pdp, start, length, temp, pdpe) {
+ struct i915_pagedir *pd;
+
+ BUG_ON(unused);
+ pd = alloc_pd_single(ppgtt->base.dev);
+ if (!pd)
+ goto pd_fail;
+
+ ppgtt->pdp.pagedir[pdpe] = pd;
+ ppgtt->num_pd_pages++;
}
- ppgtt->num_pd_pages = max_pdp;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
return 0;
-unwind_out:
- while (i--)
- free_pd_single(ppgtt->pdp.pagedir[i],
- ppgtt->base.dev);
+pd_fail:
+ while (pdpe--)
+ free_pd_single(ppgtt->pdp.pagedir[pdpe], ppgtt->base.dev);
return -ENOMEM;
}
+static void gen8_alloc_va_range(struct i915_hw_ppgtt *ppgtt,
+ uint64_t start, uint64_t length)
+{
+ struct i915_pagedir *pd;
+ struct i915_pagetab *pt;
+ uint64_t temp, temp2;
+ uint32_t pdpe, pde;
+
+ gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+ uint64_t pd_start = start;
+ uint64_t pd_len = gen8_bound_pt(start, length);
+ gen8_for_each_pde(pt, pd, pd_start, pd_len, temp2, pde) {
+ uint64_t bound = gen8_bound_pt(pd_start, pd_len);
+ int ret = alloc_pt_range(pd,
+ gen8_pde_index(pd_start),
+ gen8_pde_index(bound),
+ ppgtt->base.dev);
+ if (ret) {
+ //gen8_free_page_tables(pd, pd_start, pd_len, dev);
+ }
+
+ }
+ }
+}
+
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
+ uint64_t start, uint64_t length)
{
int ret;
- ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+ ret = gen8_ppgtt_allocate_page_directories(ppgtt, start, length);
if (ret)
return ret;
@@ -579,7 +634,7 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
goto err_out;
- ppgtt->num_pd_entries = max_pdp * I915_PDES_PER_PD;
+ ppgtt->num_pd_entries = length >> GEN8_PDE_SHIFT;
return 0;
@@ -605,11 +660,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
const int min_pt_pages = I915_PDES_PER_PD * max_pdp;
int i, j, ret;
- if (size % (1<<30))
- DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
-
/* 1. Do all our allocations for page directories and page tables. */
- ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ ret = gen8_ppgtt_alloc(ppgtt, 0, size);
if (ret)
return ret;
@@ -192,6 +192,43 @@ static inline uint32_t gen8_pml4e_index(uint64_t address)
BUG();
}
+/* Either rounds down length to the nearest page table VA boundary, or returns
+ * length
+ */
+static inline uint64_t gen8_bound_pt(uint64_t start, uint64_t length)
+{
+ uint64_t next_pt = ALIGN(start + 1, 1 << GEN8_PDE_SHIFT);
+ if (next_pt > (start + length))
+ return length;
+
+ return next_pt - start;
+}
+
+static inline uint64_t gen8_bound_pd(uint64_t start, uint64_t length)
+{
+ uint64_t next_pt = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT);
+ if (next_pt > (start + length))
+ return length;
+
+ return next_pt - start;
+}
+
+#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen8_pde_index(start), pt = (pd)->page_tables[iter]; \
+ length > 0 && iter < I915_PDES_PER_PD; \
+ pt = (pd)->page_tables[++iter], \
+ temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
+ temp = min(temp, length), \
+ start += temp, length -= temp)
+
+#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
+ for (iter = gen8_pdpe_index(start), pd = (pdp)->pagedir[iter]; \
+ length > 0 && iter < GEN8_LEGACY_PDPS; \
+ pd = (pdp)->pagedir[iter++], \
+ temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
+ temp = min(temp, length), \
+ start += temp, length -= temp)
+
enum i915_cache_level;
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a