@@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr);
void cpu_cache_init(void);
+static inline void *sh_cacheop_vaddr(void *vaddr)
+{
+ if (__in_29bit_mode())
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return vaddr;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
@@ -69,10 +69,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
void sh_sync_dma_for_device(void *vaddr, size_t size,
enum dma_data_direction direction)
{
- void *addr;
-
- addr = __in_29bit_mode() ?
- (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
+ void *addr = sh_cacheop_vaddr(vaddr);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
@@ -88,7 +85,6 @@ void sh_sync_dma_for_device(void *vaddr, size_t size,
BUG();
}
}
-EXPORT_SYMBOL(sh_sync_dma_for_device);
static int __init memchunk_setup(char *str)
{
@@ -300,8 +300,8 @@ static void maple_send(void)
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
+ PAGE_SIZE);
}
finish:
@@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work)
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
- sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
+ __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
+ 0x400);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);
And use it in the maple bus code to avoid a dma API dependency. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/sh/include/asm/cacheflush.h | 7 +++++++ arch/sh/mm/consistent.c | 6 +----- drivers/sh/maple/maple.c | 7 ++++--- 3 files changed, 12 insertions(+), 8 deletions(-)