@@ -464,6 +464,9 @@ copy_mc_to_user(void *to, const void *from, unsigned len);
unsigned long __must_check
copy_mc_fragile(void *dst, const void *src, unsigned cnt);
+
+unsigned long __must_check
+copy_mc_generic(void *dst, const void *src, unsigned cnt);
#else
static inline void enable_copy_mc_fragile(void)
{
@@ -23,7 +23,7 @@ void enable_copy_mc_fragile(void)
*
* Call into the 'fragile' version on systems that have trouble
* actually do machine check recovery. Everyone else can just
- * use memcpy().
+ * use copy_mc_generic().
*
* Return 0 for success, or number of bytes not copied if there was an
* exception.
@@ -33,8 +33,7 @@ copy_mc_to_kernel(void *dst, const void *src, unsigned cnt)
{
if (static_branch_unlikely(©_mc_fragile_key))
return copy_mc_fragile(dst, src, cnt);
- memcpy(dst, src, cnt);
- return 0;
+ return copy_mc_generic(dst, src, cnt);
}
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
@@ -56,11 +55,10 @@ copy_mc_to_user(void *to, const void *from, unsigned len)
{
unsigned long ret;
- if (!static_branch_unlikely(©_mc_fragile_key))
- return copy_user_generic(to, from, len);
-
__uaccess_begin();
- ret = copy_mc_fragile(to, from, len);
+ if (static_branch_unlikely(©_mc_fragile_key))
+ ret = copy_mc_fragile(to, from, len);
+ ret = copy_mc_generic(to, from, len);
__uaccess_end();
return ret;
}
@@ -2,7 +2,9 @@
/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
#include <asm/copy_mc_test.h>
+#include <asm/cpufeatures.h>
#include <asm/export.h>
#include <asm/asm.h>
@@ -122,4 +124,42 @@ EXPORT_SYMBOL_GPL(copy_mc_fragile)
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+
+/*
+ * copy_mc_generic - memory copy with exception handling
+ *
+ * Fast string copy + fault / exception handling. If the CPU does
+ * support machine check exception recovery, but does not support
+ * recovering from fast-string exceptions then this CPU needs to be
+ * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
+ * machine check recovery support this version should be no slower than
+ * standard memcpy.
+ */
+SYM_FUNC_START(copy_mc_generic)
+ ALTERNATIVE "jmp copy_mc_fragile", "", X86_FEATURE_ERMS
+ movq %rdi, %rax
+ movq %rdx, %rcx
+.L_copy:
+ rep movsb
+ /* Copy successful. Return zero */
+ xorl %eax, %eax
+ ret
+SYM_FUNC_END(copy_mc_generic)
+EXPORT_SYMBOL_GPL(copy_mc_generic)
+
+ .section .fixup, "ax"
+.E_copy:
+ /*
+ * On fault %rcx is updated such that the copy instruction could
+ * optionally be restarted at the fault position, i.e. it
+ * contains 'bytes remaining'. A non-zero return indicates error
+ * to copy_mc_generic() users, or indicate short transfers to
+ * user-copy routines.
+ */
+ movq %rcx, %rax
+ ret
+
+ .previous
+
+ _ASM_EXTABLE_FAULT(.L_copy, .E_copy)
#endif
@@ -548,6 +548,7 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_shift_out_of_bounds",
/* misc */
"csum_partial_copy_generic",
+ "copy_mc_generic",
"copy_mc_fragile",
"copy_mc_fragile_handle_tail",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */