@@ -267,15 +267,6 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
-
- if (pp->msi_data) {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
-
- dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
- if (pp->msi_page)
- __free_page(pp->msi_page);
- }
}
static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
@@ -336,6 +327,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr;
int ret;
u32 ctrl, num_ctrls;
@@ -375,22 +367,16 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
- pp->msi_page = alloc_page(GFP_DMA32);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- ret = dma_mapping_error(dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
- pp->msi_data = 0;
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to alloc and map MSI data\n");
dw_pcie_free_msi(pp);
-
- return ret;
+ return -ENOMEM;
}
return 0;
@@ -243,7 +243,6 @@ struct dw_pcie_rp {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];