@@ -9,5 +9,6 @@
#include <asm/special_insns.h>
void clflush_cache_range(void *addr, unsigned int size);
+void clean_cache_range(void *vaddr, unsigned int size);
#endif /* _ASM_X86_CACHEFLUSH_H */
@@ -57,27 +57,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
EXPORT_SYMBOL(clear_user);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
-/**
- * clean_cache_range - write back a cache range with CLWB
- * @vaddr: virtual start address
- * @size: number of bytes to write back
- *
- * Write back a cache range using the CLWB (cache line write back)
- * instruction. Note that @size is internally rounded up to be cache
- * line size aligned.
- */
-static void clean_cache_range(void *addr, size_t size)
-{
- u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
- unsigned long clflush_mask = x86_clflush_size - 1;
- void *vend = addr + size;
- void *p;
-
- for (p = (void *)((unsigned long)addr & ~clflush_mask);
- p < vend; p += x86_clflush_size)
- clwb(p);
-}
-
void arch_wb_cache_pmem(void *addr, size_t size)
{
clean_cache_range(addr, size);
@@ -319,6 +319,36 @@ void clflush_cache_range(void *vaddr, unsigned int size)
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
+static void clean_cache_range_opt(void *vaddr, unsigned int size)
+{
+ const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
+ void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
+ void *vend = vaddr + size;
+
+ if (p >= vend)
+ return;
+
+ for (; p < vend; p += clflush_size)
+ clwb(p);
+}
+
+/**
+ * clean_cache_range - write back a cache range with CLWB
+ * @vaddr: virtual start address
+ * @size: number of bytes to write back
+ *
+ * CLWB (cache line write back) is an unordered instruction which needs fencing
+ * with MFENCE or SFENCE to avoid ordering issues. Note that @size is
+ * internally rounded up to be cache line size aligned.
+ */
+void clean_cache_range(void *vaddr, unsigned int size)
+{
+ mb();
+ clean_cache_range_opt(vaddr, size);
+ mb();
+}
+EXPORT_SYMBOL_GPL(clean_cache_range);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_invalidate_pmem(void *addr, size_t size)
{
Export clean_cache_range() which is similar to the existing clflush_cache_range() but uses the CLWB (cache line write back) instruction instead of CLFLUSH. Remove existing implementation of clean_cache_range() from arch/x86/lib/usercopy_64.c . Signed-off-by: Dov Murik <dovmurik@linux.ibm.com> --- arch/x86/include/asm/cacheflush.h | 1 + arch/x86/lib/usercopy_64.c | 21 --------------------- arch/x86/mm/pat/set_memory.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 21 deletions(-)