@@ -550,12 +550,44 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL;
}
+static struct
+i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+{
+ struct i915_page_directory_pointer *pdp;
+ int ret = -ENOMEM;
+
+ WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+
+ pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
+ if (!pdp)
+ return ERR_PTR(-ENOMEM);
+
+ ret = __pdp_init(dev, pdp);
+ if (ret)
+ goto fail_bitmap;
+
+ ret = setup_px(dev, pdp);
+ if (ret)
+ goto fail_page_m;
+
+ return pdp;
+
+fail_page_m:
+ __pdp_fini(pdp);
+fail_bitmap:
+ kfree(pdp);
+
+ return ERR_PTR(ret);
+}
+
static void free_pdp(struct drm_device *dev,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev))
+ if (USES_FULL_48BIT_PPGTT(dev)) {
+ cleanup_px(dev, pdp);
kfree(pdp);
+ }
}
#define SCRATCH_PAGE_MAGIC 0xffff00ffffff00ffULL
@@ -852,28 +884,46 @@ static void gen8_free_page_tables(struct drm_device *dev,
}
}
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+ struct i915_page_directory_pointer *pdp)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
int i;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- for_each_set_bit(i, ppgtt->pdp.used_pdpes,
- I915_PDPES_PER_PDP(ppgtt->base.dev)) {
- if (WARN_ON(!ppgtt->pdp.page_directory[i]))
- continue;
+ for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+ if (WARN_ON(!pdp->page_directory[i]))
+ continue;
- gen8_free_page_tables(ppgtt->base.dev,
- ppgtt->pdp.page_directory[i]);
- free_pd(ppgtt->base.dev,
- ppgtt->pdp.page_directory[i]);
- }
- free_pdp(ppgtt->base.dev, &ppgtt->pdp);
- } else {
- WARN_ON(1); /* to be implemented later */
+ gen8_free_page_tables(dev, pdp->page_directory[i]);
+ free_pd(dev, pdp->page_directory[i]);
}
+ free_pdp(dev, pdp);
+}
+
+static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
+ if (WARN_ON(!ppgtt->pml4.pdps[i]))
+ continue;
+
+ gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+ }
+
+ cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+
+ if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+ else
+ gen8_ppgtt_cleanup_4lvl(ppgtt);
+
cleanup_scratch(vm);
}
@@ -1155,8 +1205,62 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
- WARN_ON(1); /* to be implemented later */
+ DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_page_directory_pointer *pdp;
+ const uint64_t orig_start = start;
+ const uint64_t orig_length = length;
+ uint64_t temp, pml4e;
+ int ret = 0;
+
+ /* Do the pml4 allocations first, so we don't need to track the newly
+ * allocated tables below the pdp */
+ bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
+
+ /* The pagedirectory and pagetable allocations are done in the shared 3
+ * and 4 level code. Just allocate the pdps.
+ */
+ gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+ if (!pdp) {
+ WARN_ON(test_bit(pml4e, pml4->used_pml4es));
+ pdp = alloc_pdp(vm->dev);
+ if (IS_ERR(pdp))
+ goto err_out;
+
+ pml4->pdps[pml4e] = pdp;
+ __set_bit(pml4e, new_pdps);
+ trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base, pml4e,
+ pml4e << GEN8_PML4E_SHIFT,
+ GEN8_PML4E_SHIFT);
+ }
+ }
+
+ WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
+ "The allocation has spanned more than 512GB. "
+ "It is highly likely this is incorrect.");
+
+ start = orig_start;
+ length = orig_length;
+
+ gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+ WARN_ON(!pdp);
+
+ ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
+ if (ret)
+ goto err_out;
+ }
+
+ bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
+ GEN8_PML4ES_PER_PML4);
+
return 0;
+
+err_out:
+ for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
+ gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+
+ return ret;
}
static int gen8_alloc_va_range(struct i915_address_space *vm,
@@ -1165,10 +1269,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- if (!USES_FULL_48BIT_PPGTT(vm->dev))
- return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
- else
+ if (USES_FULL_48BIT_PPGTT(vm->dev))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+ else
+ return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}
/* With some architectures and 32bit legacy mode, hardware pre-loads the
@@ -1243,13 +1347,21 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- ret = __pdp_init(false, &ppgtt->pdp);
+ if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+ if (ret)
+ goto clear_scratch;
+ ppgtt->base.total = 1ULL << 48;
+ } else {
+ ret = __pdp_init(false, &ppgtt->pdp);
if (ret)
goto clear_scratch;
ppgtt->base.total = 1ULL << 32;
+ trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
+ 0, 0,
+ GEN8_PML4E_SHIFT);
if (hw_wont_flush_pdp_tlbs(ppgtt)) {
/* Avoid the tlb flush bug by preallocating
* whole top level pdp structure so it stays
@@ -1261,9 +1373,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
goto clear_pdp;
}
- } else {
- ppgtt->base.total = 1ULL << 48;
- return -EPERM; /* Not yet implemented */
}
return 0;
@@ -95,6 +95,7 @@ typedef uint64_t gen8_pde_t;
*/
#define GEN8_PML4ES_PER_PML4 512
#define GEN8_PML4E_SHIFT 39
+#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
#define GEN8_PDPE_SHIFT 30
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
* tables */
@@ -458,6 +459,14 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
temp = min(temp, length), \
start += temp, length -= temp)
+#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
+ for (iter = gen8_pml4e_index(start); \
+ pdp = (pml4)->pdps[iter], length > 0 && iter < GEN8_PML4ES_PER_PML4; \
+ iter++, \
+ temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
+ temp = min(temp, length), \
+ start += temp, length -= temp)
+
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
gen8_for_each_pdpe_e(pd, pdp, start, length, temp, iter, I915_PDPES_PER_PDP(dev))
@@ -478,8 +487,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address)
static inline uint32_t gen8_pml4e_index(uint64_t address)
{
- WARN_ON(1); /* For 64B */
- return 0;
+ return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
}
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)