b/arch/arm/include/asm/dma-mapping.h
@@ -115,6 +115,11 @@ static inline void __dma_page_dev_to_cpu(struct
page *page, unsigned long off,
___dma_page_dev_to_cpu(page, off, size, dir);
}
+static inline void __dma_sync(void)
+{
+ dsb();
+}
+
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -378,6 +383,7 @@ static inline dma_addr_t dma_map_single(struct
device *dev, void *cpu_addr,
BUG_ON(!valid_dma_direction(dir));
addr = __dma_map_single(dev, cpu_addr, size, dir);
+ __dma_sync();
debug_dma_map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr & ~PAGE_MASK, size,
dir, addr, true);
@@ -407,6 +413,7 @@ static inline dma_addr_t dma_map_page(struct
device *dev, struct page *page,
BUG_ON(!valid_dma_direction(dir));
addr = __dma_map_page(dev, page, offset, size, dir);
+ __dma_sync();
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
@@ -431,6 +438,7 @@ static inline void dma_unmap_single(struct device
*dev, dma_addr_t handle,
{
debug_dma_unmap_page(dev, handle, size, dir, true);
__dma_unmap_single(dev, handle, size, dir);
+ __dma_sync();
}
/**
@@ -452,6 +460,7 @@ static inline void dma_unmap_page(struct device
*dev, dma_addr_t handle,
{
debug_dma_unmap_page(dev, handle, size, dir, false);
__dma_unmap_page(dev, handle, size, dir);
+ __dma_sync();
}
/**
@@ -498,6 +507,7 @@ static inline void
dma_sync_single_range_for_device(struct device *dev,
return;
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
+ __dma_sync();
}
static inline void dma_sync_single_for_cpu(struct device *dev,
@@ -179,8 +179,6 @@ fa_dma_inv_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -197,8 +195,6 @@ fa_dma_clean_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -212,8 +208,6 @@ ENTRY(fa_dma_flush_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -194,7 +194,6 @@ v4wb_dma_inv_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -211,7 +210,6 @@ v4wb_dma_clean_range:
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -239,8 +239,6 @@ v6_dma_inv_range:
strlo r2, [r0] @ write for ownership
#endif
blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -262,8 +260,6 @@ v6_dma_clean_range:
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -290,8 +286,6 @@ ENTRY(v6_dma_flush_range)
strlob r2, [r0] @ write for ownership
#endif
blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
@@ -257,7 +257,6 @@ v7_dma_inv_range:
add r0, r0, r2
cmp r0, r1
blo 1b
- dsb
mov pc, lr
ENDPROC(v7_dma_clean_range)
@@ -293,7 +291,6 @@ ENTRY(v7_dma_flush_range)
add r0, r0, r2
cmp r0, r1
blo 1b
- dsb
mov pc, lr
ENDPROC(v7_dma_flush_range)
@@ -97,6 +97,7 @@ static struct page *__dma_alloc_buffer(struct device
*dev, size_t size, gfp_t gf
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ __dma_sync();
return page;