@@ -208,20 +208,6 @@ or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
}
}
-static void
-or1k_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long cl;
- dma_addr_t addr = dma_handle;
- struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
-
- /* Invalidate the dcache for the requested range */
- for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBIR, cl);
-}
-
static void
or1k_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
@@ -243,7 +229,6 @@ const struct dma_map_ops or1k_dma_map_ops = {
.unmap_page = or1k_unmap_page,
.map_sg = or1k_map_sg,
.unmap_sg = or1k_unmap_sg,
- .sync_single_for_cpu = or1k_sync_single_for_cpu,
.sync_single_for_device = or1k_sync_single_for_device,
};
EXPORT_SYMBOL(or1k_dma_map_ops);
openrisc does all the required cache maintainance at dma map time, and none at unmap time. It thus has to implement sync_single_for_device to match the map cace for buffer reuse, but there is no point in doing another invalidation in the sync_single_cpu_case, which in terms of cache maintainance is equivalent to the unmap case. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/openrisc/kernel/dma.c | 15 --------------- 1 file changed, 15 deletions(-)