@@ -275,6 +275,102 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+static void free_pt_single(struct i915_pagetab *pt)
+{
+ if (WARN_ON(!pt->page))
+ return;
+ __free_page(pt->page);
+ kfree(pt);
+}
+
+static struct i915_pagetab *alloc_pt_single(void)
+{
+ struct i915_pagetab *pt;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return ERR_PTR(-ENOMEM);
+
+ pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pt->page) {
+ kfree(pt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pt;
+}
+
+/**
+ * alloc_pt_range() - Allocate a multiple page tables
+ * @pd: The page directory which will have at least @count entries
+ * available to point to the allocated page tables.
+ * @pde: First page directory entry for which we are allocating.
+ * @count: Number of pages to allocate.
+ *
+ * Allocates multiple page table pages and sets the appropriate entries in the
+ * page table structure within the page directory. Function cleans up after
+ * itself on any failures.
+ *
+ * Return: 0 if allocation succeeded.
+ */
+static int alloc_pt_range(struct i915_pagedir *pd, uint16_t pde, size_t count)
+{
+ int i, ret;
+
+ /* 512 is the max page tables per pagedir on any platform.
+ * TODO: make WARN after patch series is done
+ */
+ BUG_ON(pde + count > GEN6_PPGTT_PD_ENTRIES);
+
+ for (i = pde; i < pde + count; i++) {
+ struct i915_pagetab *pt = alloc_pt_single();
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto err_out;
+ }
+ WARN(pd->page_tables[i],
+ "Leaking page directory entry %d (%pa)\n",
+ i, pd->page_tables[i]);
+ pd->page_tables[i] = pt;
+ }
+
+ return 0;
+
+err_out:
+ while (i--)
+ free_pt_single(pd->page_tables[i]);
+ return ret;
+}
+
+static void __free_pd_single(struct i915_pagedir *pd)
+{
+ __free_page(pd->page);
+ kfree(pd);
+}
+
+#define free_pd_single(pd) do { \
+ if ((pd)->page) { \
+ __free_pd_single(pd); \
+ } \
+} while (0)
+
+static struct i915_pagedir *alloc_pd_single(void)
+{
+ struct i915_pagedir *pd;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pd->page) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val)
@@ -307,7 +403,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pdp.pagedir[i].daddr;
+ dma_addr_t addr = ppgtt->pdp.pagedir[i]->daddr;
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
@@ -334,8 +430,9 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct i915_pagedir *pd = &ppgtt->pdp.pagedir[pdpe];
- struct page *page_table = pd->page_tables[pde].page;
+ struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe];
+ struct i915_pagetab *pt = pd->page_tables[pde];
+ struct page *page_table = pt->page;
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
@@ -380,8 +477,9 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
break;
if (pt_vaddr == NULL) {
- struct i915_pagedir *pd = &ppgtt->pdp.pagedir[pdpe];
- struct page *page_table = pd->page_tables[pde].page;
+ struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe];
+ struct i915_pagetab *pt = pd->page_tables[pde];
+ struct page *page_table = pt->page;
pt_vaddr = kmap_atomic(page_table);
}
@@ -412,18 +510,13 @@ static void gen8_free_page_tables(struct i915_pagedir *pd)
{
int i;
- if (pd->page_tables == NULL)
+ if (!pd->page)
return;
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
- if (pd->page_tables[i].page)
- __free_page(pd->page_tables[i].page);
-}
-
-static void gen8_free_page_directories(struct i915_pagedir *pd)
-{
- kfree(pd->page_tables);
- __free_page(pd->page);
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+ free_pt_single(pd->page_tables[i]);
+ pd->page_tables[i] = NULL;
+ }
}
static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
@@ -431,8 +524,8 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(&ppgtt->pdp.pagedir[i]);
- gen8_free_page_directories(&ppgtt->pdp.pagedir[i]);
+ gen8_free_page_tables(ppgtt->pdp.pagedir[i]);
+ free_pd_single(ppgtt->pdp.pagedir[i]);
}
}
@@ -444,14 +537,16 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
- if (!ppgtt->pdp.pagedir[i].daddr)
+ if (!ppgtt->pdp.pagedir[i]->daddr)
continue;
- pci_unmap_page(hwdev, ppgtt->pdp.pagedir[i].daddr, PAGE_SIZE,
+ pci_unmap_page(hwdev, ppgtt->pdp.pagedir[i]->daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->pdp.pagedir[i].page_tables[j].daddr;
+ struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
+ struct i915_pagetab *pt = pd->page_tables[j];
+ dma_addr_t addr = pt->daddr;
if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -470,25 +565,20 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
- int i, j;
+ int i, ret;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- struct i915_pagedir *pd = &ppgtt->pdp.pagedir[i];
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- struct i915_pagetab *pt = &pd->page_tables[j];
-
- pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pt->page)
- goto unwind_out;
-
- }
+ ret = alloc_pt_range(ppgtt->pdp.pagedir[i],
+ 0, GEN8_PDES_PER_PAGE);
+ if (ret)
+ goto unwind_out;
}
return 0;
unwind_out:
while (i--)
- gen8_free_page_tables(&ppgtt->pdp.pagedir[i]);
+ gen8_free_page_tables(ppgtt->pdp.pagedir[i]);
return -ENOMEM;
}
@@ -499,17 +589,9 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
int i;
for (i = 0; i < max_pdp; i++) {
- struct i915_pagetab *pt;
-
- pt = kcalloc(GEN8_PDES_PER_PAGE, sizeof(*pt), GFP_KERNEL);
- if (!pt)
+ ppgtt->pdp.pagedir[i] = alloc_pd_single();
+ if (IS_ERR(ppgtt->pdp.pagedir[i]))
goto unwind_out;
-
- ppgtt->pdp.pagedir[i].page = alloc_page(GFP_KERNEL);
- if (!ppgtt->pdp.pagedir[i].page)
- goto unwind_out;
-
- ppgtt->pdp.pagedir[i].page_tables = pt;
}
ppgtt->num_pd_pages = max_pdp;
@@ -518,10 +600,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
return 0;
unwind_out:
- while (i--) {
- kfree(ppgtt->pdp.pagedir[i].page_tables);
- __free_page(ppgtt->pdp.pagedir[i].page);
- }
+ while (i--)
+ free_pd_single(ppgtt->pdp.pagedir[i]);
return -ENOMEM;
}
@@ -556,14 +636,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- ppgtt->pdp.pagedir[pd].page, 0,
+ ppgtt->pdp.pagedir[pd]->page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
if (ret)
return ret;
- ppgtt->pdp.pagedir[pd].daddr = pd_addr;
+ ppgtt->pdp.pagedir[pd]->daddr = pd_addr;
return 0;
}
@@ -573,8 +653,8 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pt)
{
dma_addr_t pt_addr;
- struct i915_pagedir *pdir = &ppgtt->pdp.pagedir[pd];
- struct i915_pagetab *ptab = &pdir->page_tables[pt];
+ struct i915_pagedir *pdir = ppgtt->pdp.pagedir[pd];
+ struct i915_pagetab *ptab = pdir->page_tables[pt];
struct page *p = ptab->page;
int ret;
@@ -637,10 +717,12 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* will never need to touch the PDEs again.
*/
for (i = 0; i < max_pdp; i++) {
+ struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(ppgtt->pdp.pagedir[i].page);
+ pd_vaddr = kmap_atomic(ppgtt->pdp.pagedir[i]->page);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->pdp.pagedir[i].page_tables[j].daddr;
+ struct i915_pagetab *pt = pd->page_tables[j];
+ dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
@@ -689,7 +771,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected;
gen6_gtt_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pd.page_tables[pde].daddr;
+ dma_addr_t pt_addr = ppgtt->pd.page_tables[pde]->daddr;
pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -700,7 +782,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[pde].page);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[pde]->page);
for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
@@ -739,7 +821,7 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
- pt_addr = ppgtt->pd.page_tables[i].daddr;
+ pt_addr = ppgtt->pd.page_tables[i]->daddr;
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
@@ -905,7 +987,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
- pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[act_pt].page);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[act_pt]->page);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
@@ -934,7 +1016,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[act_pt].page);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[act_pt]->page);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -957,7 +1039,7 @@ static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pd.page_tables[i].daddr,
+ ppgtt->pd.page_tables[i]->daddr,
4096, PCI_DMA_BIDIRECTIONAL);
}
@@ -966,8 +1048,9 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pd.page_tables[i].page);
- kfree(ppgtt->pd.page_tables);
+ free_pt_single(ppgtt->pd.page_tables[i]);
+
+ free_pd_single(&ppgtt->pd);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1022,27 +1105,6 @@ alloc:
return 0;
}
-static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
-{
- struct i915_pagetab *pt;
- int i;
-
- pt = kcalloc(ppgtt->num_pd_entries, sizeof(*pt), GFP_KERNEL);
- if (!pt)
- return -ENOMEM;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- pt[i].page = alloc_page(GFP_KERNEL);
- if (!pt->page) {
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
- }
-
- ppgtt->pd.page_tables = pt;
- return 0;
-}
-
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
int ret;
@@ -1051,7 +1113,7 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+ ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries);
if (ret) {
drm_mm_remove_node(&ppgtt->node);
return ret;
@@ -1069,7 +1131,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
struct page *page;
dma_addr_t pt_addr;
- page = ppgtt->pd.page_tables[i].page;
+ page = ppgtt->pd.page_tables[i]->page;
pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
PCI_DMA_BIDIRECTIONAL);
@@ -1078,7 +1140,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
return -EIO;
}
- ppgtt->pd.page_tables[i].daddr = pt_addr;
+ ppgtt->pd.page_tables[i]->daddr = pt_addr;
}
return 0;
@@ -277,12 +277,12 @@ struct i915_pagedir {
dma_addr_t daddr;
};
- struct i915_pagetab *page_tables;
+ struct i915_pagetab *page_tables[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */
};
struct i915_pagedirpo {
/* struct page *page; */
- struct i915_pagedir pagedir[GEN8_LEGACY_PDPES];
+ struct i915_pagedir *pagedir[GEN8_LEGACY_PDPES];
};
struct i915_hw_ppgtt {
@@ -1731,14 +1731,14 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[3].daddr);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[3].daddr);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[2].daddr);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[2].daddr);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[1].daddr);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[1].daddr);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[0].daddr);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[0].daddr);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[3]->daddr);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[3]->daddr);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[2]->daddr);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[2]->daddr);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[1]->daddr);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[1]->daddr);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[0]->daddr);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[0]->daddr);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;