@@ -2009,7 +2009,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static void print_ppgtt(struct seq_file *m, struct i915_hw_ppgtt *ppgtt)
{
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
}
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev, int verbose)
@@ -242,7 +242,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int used_pd = ppgtt->num_pd_entries / I915_PDES_PER_PD;
for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pd_dma_addr[i];
+ dma_addr_t addr = ppgtt->pdp.pagedir[i].daddr;
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
@@ -367,7 +367,6 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
gen8_free_page_tables(&ppgtt->pdp.pagedir[i]);
gen8_free_page_directories(&ppgtt->pdp.pagedir[i]);
- kfree(ppgtt->gen8_pt_dma_addr[i]);
}
}
@@ -379,14 +378,14 @@ static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
- if (!ppgtt->pd_dma_addr[i])
+ if (!ppgtt->pdp.pagedir[i].daddr)
continue;
- pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ pci_unmap_page(hwdev, ppgtt->pdp.pagedir[i].daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
for (j = 0; j < I915_PDES_PER_PD; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ dma_addr_t addr = ppgtt->pdp.pagedir[i].page_tables[j].daddr;
if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -403,31 +402,18 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
-static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
-{
- int i;
-
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ppgtt->gen8_pt_dma_addr[i] = kcalloc(I915_PDES_PER_PD,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- return -ENOMEM;
- }
-
- return 0;
-}
-
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
int i, j;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ struct i915_pagedir *pd = &ppgtt->pdp.pagedir[i];
for (j = 0; j < I915_PDES_PER_PD; j++) {
- struct i915_pagetab *pt = &ppgtt->pdp.pagedir[i].page_tables[j];
+ struct i915_pagetab *pt = &pd->page_tables[j];
pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pt->page)
goto unwind_out;
+
}
}
@@ -487,9 +473,7 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
ppgtt->num_pd_entries = max_pdp * I915_PDES_PER_PD;
- ret = gen8_ppgtt_allocate_dma(ppgtt);
- if (!ret)
- return ret;
+ return 0;
/* TODO: Check this for all cases */
err_out:
@@ -511,7 +495,7 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- ppgtt->pd_dma_addr[pdpe] = pd_addr;
+ ppgtt->pdp.pagedir[pdpe].daddr = pd_addr;
return 0;
}
@@ -521,17 +505,18 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pde)
{
dma_addr_t pt_addr;
- struct page *p;
+ struct i915_pagedir *pd = &ppgtt->pdp.pagedir[pdpe];
+ struct i915_pagetab *pt = &pd->page_tables[pde];
+ struct page *p = pt->page;
int ret;
- p = ppgtt->pdp.pagedir[pdpe].page_tables[pde].page;
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
if (ret)
return ret;
- ppgtt->gen8_pt_dma_addr[pdpe][pde] = pt_addr;
+ pt->daddr = pt_addr;
return 0;
}
@@ -587,7 +572,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(ppgtt->pdp.pagedir[i].page);
for (j = 0; j < I915_PDES_PER_PD; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ dma_addr_t addr = ppgtt->pdp.pagedir[i].page_tables[j].daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
@@ -628,14 +613,15 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
- ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ ppgtt->pd.pd_offset,
+ ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected;
gen6_gtt_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ dma_addr_t pt_addr = ppgtt->pd.page_tables[pde].daddr;
pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -678,8 +664,8 @@ static void gen6_map_single(struct i915_hw_ppgtt *ppgtt,
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
uint32_t pd_entry;
- gen6_gtt_pte_t __iomem *pd_addr =
- (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ gen6_gtt_pte_t __iomem *pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm;
+ pd_addr += ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
pd_entry = GEN6_PDE_ADDR_ENCODE(daddr);
pd_entry |= GEN6_PDE_VALID;
@@ -694,18 +680,18 @@ static void gen6_map_page_tables(struct i915_hw_ppgtt *ppgtt)
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
int i;
- WARN_ON(ppgtt->pd_offset & 0x3f);
+ WARN_ON(ppgtt->pd.pd_offset & 0x3f);
for (i = 0; i < ppgtt->num_pd_entries; i++)
- gen6_map_single(ppgtt, i, ppgtt->pt_dma_addr[i]);
+ gen6_map_single(ppgtt, i, ppgtt->pd.page_tables[i].daddr);
readl(dev_priv->gtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd_offset & 0x3f);
+ BUG_ON(ppgtt->pd.pd_offset & 0x3f);
- return (ppgtt->pd_offset / 64) << 16;
+ return (ppgtt->pd.pd_offset / 64) << 16;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -905,19 +891,16 @@ static void gen6_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
int i;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(ppgtt->base.dev->pdev,
+ ppgtt->pd.page_tables[i].daddr,
+ 4096, PCI_DMA_BIDIRECTIONAL);
}
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
- kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pd.page_tables[i].page);
kfree(ppgtt->pd.page_tables);
@@ -1010,14 +993,6 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
return ret;
}
- ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr) {
- drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
-
return 0;
}
@@ -1039,7 +1014,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
return -EIO;
}
- ppgtt->pt_dma_addr[i] = pt_addr;
+ ppgtt->pd.page_tables[i].daddr = pt_addr;
}
return 0;
@@ -1078,7 +1053,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES_PER_PT * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd_offset =
+ ppgtt->pd.pd_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
@@ -1087,7 +1062,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
gen6_map_page_tables(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd_offset << 10);
+ ppgtt->pd.pd_offset << 10);
return 0;
}
@@ -257,10 +257,16 @@ struct i915_gtt {
struct i915_pagetab {
struct page *page;
+ dma_addr_t daddr;
};
struct i915_pagedir {
struct page *page; /* NULL for GEN6-GEN7 */
+ union {
+ uint32_t pd_offset;
+ dma_addr_t daddr;
+ };
+
struct i915_pagetab *page_tables;
};
@@ -276,14 +282,6 @@ struct i915_hw_ppgtt {
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
- uint32_t pd_offset;
- dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES];
- };
- union {
- dma_addr_t *pt_dma_addr;
- dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES];
- };
- union {
struct i915_pagedirpo pdp;
struct i915_pagedir pd;
};
@@ -1719,14 +1719,14 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[3].daddr);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[3].daddr);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[2].daddr);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[2].daddr);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[1].daddr);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[1].daddr);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[0].daddr);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[0].daddr);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;