diff mbox series

[5/7] arm64: start using 'asm goto' for get_user() when available

Message ID 20240610204821.230388-6-torvalds@linux-foundation.org (mailing list archive)
State New, archived
Headers show
Series arm64 / x86-64: low-level code generation issues | expand

Commit Message

Linus Torvalds June 10, 2024, 8:48 p.m. UTC
This generates noticeably better code with compilers that support it,
since we don't need to test the error register etc, the exception just
jumps to the error handling directly.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
---
 arch/arm64/include/asm/uaccess.h | 113 ++++++++++++++++++++++++-------
 arch/arm64/kernel/mte.c          |  12 ++--
 2 files changed, 95 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 14be5000c5a0..23c2edf517ed 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -184,29 +184,40 @@  static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
  * The "__xxx_error" versions set the third argument to -EFAULT if an error
  * occurs, and leave it unchanged on success.
  */
-#define __get_mem_asm(load, reg, x, addr, err, type)			\
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_mem_asm(load, reg, x, addr, label, type)			\
+	asm_goto_output(						\
+	"1:	" load "	" reg "0, [%1]\n"			\
+	_ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0)			\
+	: "=r" (x)							\
+	: "r" (addr) : : label)
+#else
+#define __get_mem_asm(load, reg, x, addr, label, type) do {		\
+	int __gma_err = 0;						\
 	asm volatile(							\
 	"1:	" load "	" reg "1, [%2]\n"			\
 	"2:\n"								\
 	_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1)		\
-	: "+r" (err), "=r" (x)						\
-	: "r" (addr))
+	: "+r" (__gma_err), "=r" (x)					\
+	: "r" (addr));							\
+	if (__gma_err) goto label; } while (0)
+#endif
 
-#define __raw_get_mem(ldr, x, ptr, err, type)					\
+#define __raw_get_mem(ldr, x, ptr, label, type)					\
 do {										\
 	unsigned long __gu_val;							\
 	switch (sizeof(*(ptr))) {						\
 	case 1:									\
-		__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type);	\
+		__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type);	\
 		break;								\
 	case 2:									\
-		__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type);	\
+		__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type);	\
 		break;								\
 	case 4:									\
-		__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type);		\
+		__get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type);		\
 		break;								\
 	case 8:									\
-		__get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err), type);	\
+		__get_mem_asm(ldr, "%x",  __gu_val, (ptr), label, type);	\
 		break;								\
 	default:								\
 		BUILD_BUG();							\
@@ -219,27 +230,34 @@  do {										\
  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
  * we must evaluate these outside of the critical section.
  */
-#define __raw_get_user(x, ptr, err)					\
+#define __raw_get_user(x, ptr, label)					\
 do {									\
 	__typeof__(*(ptr)) __user *__rgu_ptr = (ptr);			\
 	__typeof__(x) __rgu_val;					\
 	__chk_user_ptr(ptr);						\
-									\
-	uaccess_ttbr0_enable();						\
-	__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U);		\
-	uaccess_ttbr0_disable();					\
-									\
-	(x) = __rgu_val;						\
+	do {								\
+		__label__ __rgu_failed;					\
+		uaccess_ttbr0_enable();					\
+		__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U);	\
+		uaccess_ttbr0_disable();				\
+		(x) = __rgu_val;					\
+		break;							\
+	__rgu_failed:							\
+		uaccess_ttbr0_disable();				\
+		goto label;						\
+	} while (0);							\
 } while (0)
 
 #define __get_user_error(x, ptr, err)					\
 do {									\
+	__label__ __gu_failed;						\
 	__typeof__(*(ptr)) __user *__p = (ptr);				\
 	might_fault();							\
 	if (access_ok(__p, sizeof(*__p))) {				\
 		__p = uaccess_mask_ptr(__p);				\
-		__raw_get_user((x), __p, (err));			\
+		__raw_get_user((x), __p, __gu_failed);			\
 	} else {							\
+	__gu_failed:							\
 		(x) = (__force __typeof__(x))0; (err) = -EFAULT;	\
 	}								\
 } while (0)
@@ -262,15 +280,18 @@  do {									\
 do {									\
 	__typeof__(dst) __gkn_dst = (dst);				\
 	__typeof__(src) __gkn_src = (src);				\
-	int __gkn_err = 0;						\
+	do { 								\
+		__label__ __gkn_label;					\
 									\
-	__mte_enable_tco_async();					\
-	__raw_get_mem("ldr", *((type *)(__gkn_dst)),			\
-		      (__force type *)(__gkn_src), __gkn_err, K);	\
-	__mte_disable_tco_async();					\
-									\
-	if (unlikely(__gkn_err))					\
+		__mte_enable_tco_async();				\
+		__raw_get_mem("ldr", *((type *)(__gkn_dst)),		\
+		      (__force type *)(__gkn_src), __gkn_label, K);	\
+		__mte_disable_tco_async();				\
+		break;							\
+	__gkn_label:							\
+		__mte_disable_tco_async();				\
 		goto err_label;						\
+	} while (0);							\
 } while (0)
 
 #define __put_mem_asm(store, reg, x, addr, err, type)			\
@@ -381,6 +402,52 @@  extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
 	__actu_ret;							\
 })
 
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+	if (unlikely(!access_ok(ptr,len)))
+		return 0;
+	uaccess_ttbr0_enable();
+	return 1;
+}
+#define user_access_begin(a,b)	user_access_begin(a,b)
+#define user_access_end()	uaccess_ttbr0_disable()
+
+/*
+ * The arm64 inline asms should learn abut asm goto, and we should
+ * teach user_access_begin() about address masking.
+ */
+#define unsafe_put_user(x, ptr, label)	do {				\
+	int __upu_err = 0;						\
+	__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), __upu_err, U);	\
+	if (__upu_err) goto label;				\
+} while (0)
+
+#define unsafe_get_user(x, ptr, label) \
+	__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
+
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label)				\
+	while (len >= sizeof(type)) {						\
+		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
+		dst += sizeof(type);						\
+		src += sizeof(type);						\
+		len -= sizeof(type);						\
+	}
+
+#define unsafe_copy_to_user(_dst,_src,_len,label)			\
+do {									\
+	char __user *__ucu_dst = (_dst);				\
+	const char *__ucu_src = (_src);					\
+	size_t __ucu_len = (_len);					\
+	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
+	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
+	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
+	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
+} while (0)
+
 #define INLINE_COPY_TO_USER
 #define INLINE_COPY_FROM_USER
 
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dcdcccd40891..6174671be7c1 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -582,12 +582,9 @@  subsys_initcall(register_mte_tcf_preferred_sysctl);
 size_t mte_probe_user_range(const char __user *uaddr, size_t size)
 {
 	const char __user *end = uaddr + size;
-	int err = 0;
 	char val;
 
-	__raw_get_user(val, uaddr, err);
-	if (err)
-		return size;
+	__raw_get_user(val, uaddr, efault);
 
 	uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
 	while (uaddr < end) {
@@ -595,12 +592,13 @@  size_t mte_probe_user_range(const char __user *uaddr, size_t size)
 		 * A read is sufficient for mte, the caller should have probed
 		 * for the pte write permission if required.
 		 */
-		__raw_get_user(val, uaddr, err);
-		if (err)
-			return end - uaddr;
+		__raw_get_user(val, uaddr, efault);
 		uaddr += MTE_GRANULE_SIZE;
 	}
 	(void)val;
 
 	return 0;
+
+efault:
+	return end - uaddr;
 }