diff mbox series

[v13,5/5] arm64: introduce copy_mc_to_kernel() implementation

Message ID 20241209024257.3618492-6-tongtiangen@huawei.com (mailing list archive)
State New, archived
Headers show
Series arm64: add ARCH_HAS_COPY_MC support | expand

Commit Message

Tong Tiangen Dec. 9, 2024, 2:42 a.m. UTC
The copy_mc_to_kernel() helper is memory copy implementation that handles
source exceptions. It can be used in memory copy scenarios that tolerate
hardware memory errors(e.g: pmem_read/dax_copy_to_iter).

Currently, only x86 and ppc support this helper, Add this for ARM64 as
well, if ARCH_HAS_COPY_MC is defined, by implementing copy_mc_to_kernel()
and memcpy_mc() functions.

Because there is no caller-saved GPR is available for saving "bytes not
copied" in memcpy(), the memcpy_mc() is referenced to the implementation
of copy_from_user(). In addition, the fixup of MOPS insn is not considered
at present.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
 arch/arm64/include/asm/string.h  |  5 ++
 arch/arm64/include/asm/uaccess.h | 18 ++++++
 arch/arm64/lib/Makefile          |  2 +-
 arch/arm64/lib/memcpy_mc.S       | 98 ++++++++++++++++++++++++++++++++
 mm/kasan/shadow.c                | 12 ++++
 5 files changed, 134 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/lib/memcpy_mc.S

Comments

Catalin Marinas Feb. 12, 2025, 5:18 p.m. UTC | #1
On Mon, Dec 09, 2024 at 10:42:57AM +0800, Tong Tiangen wrote:
> The copy_mc_to_kernel() helper is memory copy implementation that handles
> source exceptions. It can be used in memory copy scenarios that tolerate
> hardware memory errors(e.g: pmem_read/dax_copy_to_iter).
> 
> Currently, only x86 and ppc support this helper, Add this for ARM64 as
> well, if ARCH_HAS_COPY_MC is defined, by implementing copy_mc_to_kernel()
> and memcpy_mc() functions.
> 
> Because there is no caller-saved GPR is available for saving "bytes not
> copied" in memcpy(), the memcpy_mc() is referenced to the implementation
> of copy_from_user(). In addition, the fixup of MOPS insn is not considered
> at present.

Same question as on the previous patch, can we not avoid the memcpy()
duplication if the only difference is entries in the exception table?
IIUC in patch 2 fixup_exception() even ignores the new type. The error
must come on the do_sea() path.
Tong Tiangen Feb. 14, 2025, 2:57 a.m. UTC | #2
在 2025/2/13 1:18, Catalin Marinas 写道:
> On Mon, Dec 09, 2024 at 10:42:57AM +0800, Tong Tiangen wrote:
>> The copy_mc_to_kernel() helper is memory copy implementation that handles
>> source exceptions. It can be used in memory copy scenarios that tolerate
>> hardware memory errors(e.g: pmem_read/dax_copy_to_iter).
>>
>> Currently, only x86 and ppc support this helper, Add this for ARM64 as
>> well, if ARCH_HAS_COPY_MC is defined, by implementing copy_mc_to_kernel()
>> and memcpy_mc() functions.
>>
>> Because there is no caller-saved GPR is available for saving "bytes not
>> copied" in memcpy(), the memcpy_mc() is referenced to the implementation
>> of copy_from_user(). In addition, the fixup of MOPS insn is not considered
>> at present.
> 
> Same question as on the previous patch, can we not avoid the memcpy()
> duplication if the only difference is entries in the exception table?
> IIUC in patch 2 fixup_exception() even ignores the new type. The error
> must come on the do_sea() path.

As I said in commit message, it is not normalized with the memcpy()
because of the lack of GPR. If there is no GPR shortage problem, we can
extract the common code of memcpy_mc() and memcpy(),The unextracted
code is using different exception table entries.

Thanks,
Tong.

>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 3a3264ff47b9..23eca4fb24fa 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -35,6 +35,10 @@  extern void *memchr(const void *, int, __kernel_size_t);
 extern void *memcpy(void *, const void *, __kernel_size_t);
 extern void *__memcpy(void *, const void *, __kernel_size_t);
 
+#define __HAVE_ARCH_MEMCPY_MC
+extern int memcpy_mc(void *, const void *, __kernel_size_t);
+extern int __memcpy_mc(void *, const void *, __kernel_size_t);
+
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *, const void *, __kernel_size_t);
 extern void *__memmove(void *, const void *, __kernel_size_t);
@@ -57,6 +61,7 @@  void memcpy_flushcache(void *dst, const void *src, size_t cnt);
  */
 
 #define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memcpy_mc(dst, src, len) __memcpy_mc(dst, src, len)
 #define memmove(dst, src, len) __memmove(dst, src, len)
 #define memset(s, c, n) __memset(s, c, n)
 
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5b91803201ef..2a14b732306a 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -542,4 +542,22 @@  static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
 
 #endif /* CONFIG_ARM64_GCS */
 
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+/**
+ * copy_mc_to_kernel - memory copy that handles source exceptions
+ *
+ * @to:		destination address
+ * @from:	source address
+ * @size:	number of bytes to copy
+ *
+ * Return 0 for success, or bytes not copied.
+ */
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+	return memcpy_mc(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+#endif
+
 #endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 78b0e9904689..326d71ba0517 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -13,7 +13,7 @@  endif
 
 lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
 
-lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o
+lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o memcpy_mc.o
 
 obj-$(CONFIG_CRC32) += crc32.o crc32-glue.o
 
diff --git a/arch/arm64/lib/memcpy_mc.S b/arch/arm64/lib/memcpy_mc.S
new file mode 100644
index 000000000000..cb9caaa1ab0b
--- /dev/null
+++ b/arch/arm64/lib/memcpy_mc.S
@@ -0,0 +1,98 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
+#include <asm/asm-uaccess.h>
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ *	x0 - dest
+ *	x1 - src
+ *	x2 - n
+ * Returns:
+ *	x0 - bytes not copied
+ */
+	.macro ldrb1 reg, ptr, val
+	KERNEL_MEM_ERR(9997f, ldrb  \reg, [\ptr], \val)
+	.endm
+
+	.macro strb1 reg, ptr, val
+	strb \reg, [\ptr], \val
+	.endm
+
+	.macro ldrh1 reg, ptr, val
+	KERNEL_MEM_ERR(9997f, ldrh  \reg, [\ptr], \val)
+	.endm
+
+	.macro strh1 reg, ptr, val
+	strh \reg, [\ptr], \val
+	.endm
+
+	.macro ldr1 reg, ptr, val
+	KERNEL_MEM_ERR(9997f, ldr \reg, [\ptr], \val)
+	.endm
+
+	.macro str1 reg, ptr, val
+	str \reg, [\ptr], \val
+	.endm
+
+	.macro ldp1 reg1, reg2, ptr, val
+	KERNEL_MEM_ERR(9997f, ldp \reg1, \reg2, [\ptr], \val)
+	.endm
+
+	.macro stp1 reg1, reg2, ptr, val
+	stp \reg1, \reg2, [\ptr], \val
+	.endm
+
+end	.req	x5
+SYM_FUNC_START(__memcpy_mc_generic)
+	add	end, x0, x2
+#include "copy_template.S"
+	mov	x0, #0				// Nothing to copy
+	ret
+
+	// Exception fixups
+9997:	sub	x0, end, dst			// bytes not copied
+	ret
+SYM_FUNC_END(__memcpy_mc_generic)
+
+#ifdef CONFIG_AS_HAS_MOPS
+	.arch_extension mops
+SYM_FUNC_START(__memcpy_mc)
+alternative_if_not ARM64_HAS_MOPS
+	b       __memcpy_mc_generic
+alternative_else_nop_endif
+
+dstin   .req    x0
+src     .req    x1
+count   .req    x2
+dst     .req    x3
+
+	mov     dst, dstin
+	cpyp    [dst]!, [src]!, count!
+	cpym    [dst]!, [src]!, count!
+	cpye    [dst]!, [src]!, count!
+
+	mov	x0, #0				// Nothing to copy
+	ret
+SYM_FUNC_END(__memcpy_mc)
+#else
+SYM_FUNC_ALIAS(__memcpy_mc, __memcpy_mc_generic)
+#endif
+
+EXPORT_SYMBOL(__memcpy_mc)
+SYM_FUNC_ALIAS_WEAK(memcpy_mc, __memcpy_mc)
+EXPORT_SYMBOL(memcpy_mc)
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 88d1c9dcb507..a12770fb2e9c 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -79,6 +79,18 @@  void *memcpy(void *dest, const void *src, size_t len)
 }
 #endif
 
+#ifdef __HAVE_ARCH_MEMCPY_MC
+#undef memcpy_mc
+int memcpy_mc(void *dest, const void *src, size_t len)
+{
+	if (!kasan_check_range(src, len, false, _RET_IP_) ||
+	    !kasan_check_range(dest, len, true, _RET_IP_))
+		return (int)len;
+
+	return __memcpy_mc(dest, src, len);
+}
+#endif
+
 void *__asan_memset(void *addr, int c, ssize_t len)
 {
 	if (!kasan_check_range(addr, len, true, _RET_IP_))