@@ -210,45 +210,33 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-#define dma_unmap_pt_single(pt, dev) do { \
- pci_unmap_page((dev)->pdev, (pt)->daddr, 4096, PCI_DMA_BIDIRECTIONAL); \
+#define i915_dma_unmap_single(px, dev) do { \
+ pci_unmap_page((dev)->pdev, (px)->daddr, 4096, PCI_DMA_BIDIRECTIONAL); \
} while (0);
/**
- * dma_map_pt_single() - Create a dma mapping for a page table
- * @pt: Page table to get a DMA map for
+ * i915_dma_map_px_single() - Create a dma mapping for a page table/dir/etc.
+ * @px: Page table/dir/etc to get a DMA map for
* @dev: drm device
*
* Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping.
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
*
* Return: 0 if success.
*/
-static int dma_map_pt_single(struct i915_pagetab *pt, struct drm_device *dev)
-{
- struct page *page;
- dma_addr_t pt_addr;
- int ret;
-
- page = pt->page;
- pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- ret = pci_dma_mapping_error(dev->pdev, pt_addr);
- if (ret)
- return ret;
-
- pt->daddr = pt_addr;
-
- return 0;
-}
+#define i915_dma_map_px_single(px, dev) \
+ pci_dma_mapping_error((dev)->pdev, \
+ (px)->daddr = pci_map_page((dev)->pdev, \
+ (px)->page, 0, 4096, \
+ PCI_DMA_BIDIRECTIONAL))
static void free_pt_single(struct i915_pagetab *pt, struct drm_device *dev)
{
if (WARN_ON(!pt->page))
return;
- dma_unmap_pt_single(pt, dev);
+ i915_dma_unmap_single(pt, dev);
__free_page(pt->page);
kfree(pt);
}
@@ -268,7 +256,7 @@ static struct i915_pagetab *alloc_pt_single(struct drm_device *dev)
return ERR_PTR(-ENOMEM);
}
- ret = dma_map_pt_single(pt, dev);
+ ret = i915_dma_map_px_single(pt, dev);
if (ret) {
__free_page(pt->page);
kfree(pt);
@@ -510,7 +498,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
- struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+ struct drm_device *dev = ppgtt->base.dev;
int i, j;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
@@ -519,16 +507,14 @@ static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
if (!ppgtt->pdp.pagedir[i]->daddr)
continue;
- pci_unmap_page(hwdev, ppgtt->pdp.pagedir[i]->daddr, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev);
for (j = 0; j < I915_PDES_PER_PD; j++) {
struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
struct i915_pagetab *pt = pd->page_tables[j];
dma_addr_t addr = pt->daddr;
if (addr)
- pci_unmap_page(hwdev, addr, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ i915_dma_unmap_single(pt, dev);
}
}
}
@@ -611,19 +597,13 @@ err_out:
static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
const int pdpe)
{
- dma_addr_t pd_addr;
int ret;
- pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- ppgtt->pdp.pagedir[pdpe]->page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-
- ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+ ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe],
+ ppgtt->base.dev);
if (ret)
return ret;
- ppgtt->pdp.pagedir[pdpe]->daddr = pd_addr;
-
return 0;
}