@@ -73,9 +73,11 @@
/*
* dcimvac: Invalidate data cache line by MVA to PoC
*/
-.macro dcimvac, rt, tmp
- v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
+.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+.macro dcimvac\c, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
.endm
+.endr
/*
* dccmvau: Clean data cache line by MVA to PoU
@@ -369,14 +371,16 @@ v7m_dma_inv_range:
tst r0, r3
bic r0, r0, r3
dccimvacne r0, r3
+ addne r0, r0, r2
subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
tst r1, r3
bic r1, r1, r3
dccimvacne r1, r3
-1:
- dcimvac r0, r3
- add r0, r0, r2
cmp r0, r1
+1:
+ dcimvaclo r0, r3
+ addlo r0, r0, r2
+ cmplo r0, r1
blo 1b
dsb st
ret lr
Chris has discovered and reported that v7_dma_inv_range() may corrupt memory if address range is not aligned to cache line size. Since the whole cache-v7m.S was lifted form cache-v7.S the same observation applies to v7m_dma_inv_range(). So the fix just mirrors what has been done for v7 with a little specific of M-class. Cc: Chris Cole <chris@sageembedded.com> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> --- Changelog v1 -> v2 - keep "cmp r0, r1" (per Chris) arch/arm/mm/cache-v7m.S | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-)