@@ -104,13 +104,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
}
#define flush_icache_range flush_icache_range
-/*
- * Cache maintenance functions used by the DMA API. No to be used directly.
- */
-extern void __dma_map_area(const void *, size_t, int);
-extern void __dma_unmap_area(const void *, size_t, int);
-extern void __dma_flush_area(const void *, size_t);
-
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
@@ -194,44 +194,3 @@ SYM_FUNC_START(__pi_dcache_clean_pop)
ret
SYM_FUNC_END(__pi_dcache_clean_pop)
SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
-
-/*
- * __dma_flush_area(start, size)
- *
- * clean & invalidate D / U line
- *
- * - start - virtual start address of region
- * - size - size in question
- */
-SYM_FUNC_START(__pi___dma_flush_area)
- add x1, x0, x1
- dcache_by_line_op civac, sy, x0, x1, x2, x3
- ret
-SYM_FUNC_END(__pi___dma_flush_area)
-SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area)
-
-/*
- * __dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-SYM_FUNC_START(__pi___dma_map_area)
- add x1, x0, x1
- b __pi_dcache_clean_poc
-SYM_FUNC_END(__pi___dma_map_area)
-SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area)
-
-/*
- * __dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-SYM_FUNC_START(__pi___dma_unmap_area)
- add x1, x0, x1
- cmp w2, #DMA_TO_DEVICE
- b.ne __pi_dcache_inval_poc
- ret
-SYM_FUNC_END(__pi___dma_unmap_area)
-SYM_FUNC_ALIAS(__dma_unmap_area, __pi___dma_unmap_area)
@@ -14,20 +14,29 @@
#include <asm/cacheflush.h>
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_map_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ dcache_clean_poc(start, start + size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_unmap_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ if (dir == DMA_TO_DEVICE)
+ return;
+
+ dcache_inval_poc(start, start + size);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
- __dma_flush_area(page_address(page), size);
+ unsigned long start = (unsigned long)page_address(page);
+
+ dcache_clean_inval_poc(start, start + size);
}
#ifdef CONFIG_IOMMU_DMA
Remove the __dma_{flush,map,unmap}_area assembly wrappers and call the appropriate cache maintenance functions directly from the DMA mapping callbacks. Signed-off-by: Will Deacon <will@kernel.org> --- arch/arm64/include/asm/cacheflush.h | 7 ----- arch/arm64/mm/cache.S | 41 ----------------------------- arch/arm64/mm/dma-mapping.c | 19 +++++++++---- 3 files changed, 14 insertions(+), 53 deletions(-)