b/arch/arm/include/asm/cacheflush.h
@@ -84,6 +84,15 @@
* - kaddr - page address
* - size - region size
*
+ * iommu_flush_area(start, size)
+ *
+ * Perform CPU specific cache operations are required to ensure
+ * that the IOMMU page table mappings covering the speified block
+ * memory are visible to the IOMMU. This api is intended for the
+ * IOMMU page table memory, do not use this for the general data.
+ * - start - virtual start address
+ * - size - region size
+ *
* DMA Cache Coherency
* ===================
*
@@ -108,6 +117,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
+ void (*iommu_flush_area)(const void *, size_t);
};
/*
@@ -135,6 +145,12 @@ extern struct cpu_cache_fns cpu_cache;
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
+/* This API is to support non-coherent IOMMUs. The purpose of
+ * this API is to ensure that the data held in the cache is visible
+ * to the MMU of the slave processor. Do not use this for general data.
+ */
+#define iommu_flush_area (cpu_cache.iommu_flush_area)
+
#else
extern void __cpuc_flush_icache_all(void);
@@ -155,6 +171,11 @@ extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
+/* This API is to support non-coherent IOMMUs. The purpose of
+ * this API is to ensure that the data held in the cache is visible
+ * to the MMU of the slave processor. Do not use this for general data.
+ */
+extern void iommu_flush_area(const void *, size_t);
#endif
/*
b/arch/arm/include/asm/glue-cache.h
@@ -141,6 +141,7 @@
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#define iommu_flush_area __glue(_CACHE, _iommu_flush_area)
#endif
#endif
@@ -217,6 +217,22 @@ ENTRY(fa_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(fa_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -20,7 +20,6 @@
ENTRY(v3_flush_icache_all)
mov pc, lr
ENDPROC(v3_flush_icache_all)
-
/*
* flush_user_cache_all()
*
@@ -107,6 +106,19 @@ ENTRY(v3_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(v3_iommu_flush_area)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ mov pc, lr
+
+/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -119,6 +119,21 @@ ENTRY(v4_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(v4_iommu_flush_area)
+#ifdef CONFIG_CPU_CP15
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
+#endif
+ mov pc, lr
+
+/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -228,6 +228,28 @@ v4wb_dma_clean_range:
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(v4wb_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov ip, #0
+ mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+ENDPROC(v4wb_iommu_flush_area)
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -174,6 +174,24 @@ v4wt_dma_inv_range:
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
/*
+ * iommu_flush_area(start, end)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(v4wt_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov pc, lr
+ENDPROC(v4wt_iommu_flush_area)
+
+/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -326,6 +326,27 @@ ENTRY(v6_dma_unmap_area)
mov pc, lr
ENDPROC(v6_dma_unmap_area)
+/*
+ * v6_iommu_flush_area(start, size)
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(v6_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #D_CACHE_LINE_SIZE - 1
+1:
+#ifdef HARVARD_CACHE
+ mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
+#else
+ mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
+#endif
+ add r0, r0, #D_CACHE_LINE_SIZE
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+ENDPROC(v6_iommu_flush_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
@@ -320,6 +320,28 @@ ENTRY(v7_dma_flush_range)
dsb
mov pc, lr
ENDPROC(v7_dma_flush_range)
+/*
+ * v7_iommu_flush_area(start, size)
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(v7_iommu_flush_area)
+ dcache_line_size r2, r3
+ add r1, r0, r1
+ sub r3, r2, #1
+ bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
+1:
+ mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb
+ mov pc, lr
+ENDPROC(v7_iommu_flush_area)
/*
* dma_map_area(start, size, dir)
@@ -345,6 +345,29 @@ ENTRY(arm1020_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm1020_iommu_flush_area)
+ add r1, r1, r0
+ mov ip, #0
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcr p15, 0, ip, c7, c10, 4
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -331,6 +331,27 @@ ENTRY(arm1020e_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm1020e_iommu_flush_area)
+ add r1, r1, r0
+ mov ip, #0
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -320,6 +320,27 @@ ENTRY(arm1022_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm1022_iommu_flush_area)
+ add r1, r1, r0
+ mov ip, #0
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -314,6 +314,26 @@ ENTRY(arm1026_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm1026_iommu_flush_area)
+ mov ip, #0
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -296,6 +296,24 @@ ENTRY(arm920_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm920_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -298,6 +298,24 @@ ENTRY(arm922_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm922_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -353,6 +353,29 @@ ENTRY(arm925_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm925_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+#else
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+#endif
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -316,6 +316,29 @@ ENTRY(arm926_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm926_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+#else
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+#endif
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -244,6 +244,32 @@ ENTRY(arm940_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate a specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(arm940_iommu_flush_area)
+ add r1, r1, r0
+ mov ip, #0
+ mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
+1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
+#else
+ mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
+#endif
+ subs r3, r3, #1 << 26
+ bcs 2b @ entries 63 to 0
+ subs r1, r1, #1 << 4
+ bcs 1b @ segments 7 to 0
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -287,6 +287,31 @@ ENTRY(arm946_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ *
+ * (same as arm926)
+ */
+ENTRY(arm946_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+#else
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+#endif
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -364,6 +364,25 @@ ENTRY(feroceon_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ .align 5
+ENTRY(feroceon_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
.align 5
ENTRY(feroceon_range_dma_flush_range)
mrs r2, cpsr
@@ -377,6 +396,19 @@ ENTRY(feroceon_range_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_iommu_flush_area)
+ add r1, r1, r0
+ mrs r2, cpsr
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
+ mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
@@ -307,6 +307,7 @@ ENTRY(\name\()_cache_fns)
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
.long \name\()_dma_flush_range
+ .long \name\()_iommu_flush_area
.size \name\()_cache_fns, . - \name\()_cache_fns
.endm
@@ -280,6 +280,25 @@ ENTRY(mohawk_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(mohawk_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1:
+ mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -314,6 +314,24 @@ ENTRY(xsc3_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(xsc3_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHELINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
+ add r0, r0, #CACHELINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ data write barrier
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -374,6 +374,25 @@ ENTRY(xscale_dma_flush_range)
mov pc, lr
/*
+ * iommu_flush_area(start, size)
+ *
+ * Clean and invalidate the specified virtual address area.
+ *
+ * - start - virtual start address
+ * - size - size of region
+ */
+ENTRY(xscale_iommu_flush_area)
+ add r1, r1, r0
+ bic r0, r0, #CACHELINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHELINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ mov pc, lr
+
+/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
@@ -445,6 +464,7 @@ ENDPROC(xscale_dma_unmap_area)
a0_alias flush_kern_dcache_area
a0_alias dma_flush_range
a0_alias dma_unmap_area
+ a0_alias iommu_flush_area
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xscale_80200_A0_A1