diff mbox series

[v5,5/6] riscv: lib: vectorize copy_to_user/copy_from_user

Message ID 20231214155721.1753-6-andy.chiu@sifive.com (mailing list archive)
State Superseded
Headers show
Series riscv: support kernel-mode Vector | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR fail PR summary
conchuod/patch-5-test-1 success .github/scripts/patches/build_rv32_defconfig.sh
conchuod/patch-5-test-2 fail .github/scripts/patches/build_rv64_clang_allmodconfig.sh
conchuod/patch-5-test-3 fail .github/scripts/patches/build_rv64_gcc_allmodconfig.sh
conchuod/patch-5-test-4 fail .github/scripts/patches/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-5-test-5 fail .github/scripts/patches/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-5-test-6 warning .github/scripts/patches/checkpatch.sh
conchuod/patch-5-test-7 success .github/scripts/patches/dtb_warn_rv64.sh
conchuod/patch-5-test-8 success .github/scripts/patches/header_inline.sh
conchuod/patch-5-test-9 success .github/scripts/patches/kdoc.sh
conchuod/patch-5-test-10 success .github/scripts/patches/module_param.sh
conchuod/patch-5-test-11 success .github/scripts/patches/verify_fixes.sh
conchuod/patch-5-test-12 success .github/scripts/patches/verify_signedoff.sh

Commit Message

Andy Chiu Dec. 14, 2023, 3:57 p.m. UTC
This patch utilizes Vector to perform copy_to_user/copy_from_user. If
Vector is available and the size of copy is large enough for Vector to
perform better than scalar, then direct the kernel to do Vector copies
for userspace. Though the best programming practice for users is to
reduce the copy, this provides a faster variant when copies are
inevitable.

The optimal size for using Vector, copy_to_user_thres, is only a
heuristic for now. We can add DT parsing if people feel the need of
customizing it.

The exception fixup code of the __asm_vector_usercopy must fallback to
the scalar one because accessing user pages might fault, and must be
sleepable. Current kernel-mode Vector does not allow tasks to be
preemptible, so we must disactivate Vector and perform a scalar fallback
in such case.

The original implementation of Vector operations comes from
https://github.com/sifive/sifive-libc, which we agree to contribute to
Linux kernel.

Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
---
Changelog v4:
 - new patch since v4
---
 arch/riscv/lib/Makefile          |  2 ++
 arch/riscv/lib/riscv_v_helpers.c | 38 ++++++++++++++++++++++
 arch/riscv/lib/uaccess.S         | 11 +++++++
 arch/riscv/lib/uaccess_vector.S  | 55 ++++++++++++++++++++++++++++++++
 4 files changed, 106 insertions(+)
 create mode 100644 arch/riscv/lib/riscv_v_helpers.c
 create mode 100644 arch/riscv/lib/uaccess_vector.S

Comments

Charlie Jenkins Dec. 15, 2023, 6:25 a.m. UTC | #1
On Thu, Dec 14, 2023 at 03:57:20PM +0000, Andy Chiu wrote:
> This patch utilizes Vector to perform copy_to_user/copy_from_user. If
> Vector is available and the size of copy is large enough for Vector to
> perform better than scalar, then direct the kernel to do Vector copies
> for userspace. Though the best programming practice for users is to
> reduce the copy, this provides a faster variant when copies are
> inevitable.
> 
> The optimal size for using Vector, copy_to_user_thres, is only a
> heuristic for now. We can add DT parsing if people feel the need of
> customizing it.
> 
> The exception fixup code of the __asm_vector_usercopy must fallback to
> the scalar one because accessing user pages might fault, and must be
> sleepable. Current kernel-mode Vector does not allow tasks to be
> preemptible, so we must disactivate Vector and perform a scalar fallback
> in such case.
> 
> The original implementation of Vector operations comes from
> https://github.com/sifive/sifive-libc, which we agree to contribute to
> Linux kernel.
> 
> Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
> ---
> Changelog v4:
>  - new patch since v4
> ---
>  arch/riscv/lib/Makefile          |  2 ++
>  arch/riscv/lib/riscv_v_helpers.c | 38 ++++++++++++++++++++++
>  arch/riscv/lib/uaccess.S         | 11 +++++++
>  arch/riscv/lib/uaccess_vector.S  | 55 ++++++++++++++++++++++++++++++++
>  4 files changed, 106 insertions(+)
>  create mode 100644 arch/riscv/lib/riscv_v_helpers.c
>  create mode 100644 arch/riscv/lib/uaccess_vector.S
> 
> diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
> index 494f9cd1a00c..1fe8d797e0f2 100644
> --- a/arch/riscv/lib/Makefile
> +++ b/arch/riscv/lib/Makefile
> @@ -12,3 +12,5 @@ lib-$(CONFIG_RISCV_ISA_ZICBOZ)	+= clear_page.o
>  
>  obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
>  lib-$(CONFIG_RISCV_ISA_V)	+= xor.o
> +lib-$(CONFIG_RISCV_ISA_V)	+= riscv_v_helpers.o
> +lib-$(CONFIG_RISCV_ISA_V)	+= uaccess_vector.o
> diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c
> new file mode 100644
> index 000000000000..d763b9c69fb7
> --- /dev/null
> +++ b/arch/riscv/lib/riscv_v_helpers.c
> @@ -0,0 +1,38 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Copyright (C) 2023 SiFive
> + * Author: Andy Chiu <andy.chiu@sifive.com>
> + */
> +#include <linux/linkage.h>
> +#include <asm/asm.h>
> +
> +#include <asm/vector.h>
> +#include <asm/simd.h>
> +
> +size_t riscv_v_usercopy_thres = 768;
> +int __asm_vector_usercopy(void *dst, void *src, size_t n);
> +int fallback_scalar_usercopy(void *dst, void *src, size_t n);
> +asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
> +{
> +	size_t remain, copied;
> +
> +	/* skip has_vector() check because it has been done by the asm  */
> +	if (!may_use_simd())
> +		goto fallback;
> +
> +	kernel_vector_begin();
> +	remain = __asm_vector_usercopy(dst, src, n);
> +	kernel_vector_end();
> +
> +	if (remain) {
> +		copied = n - remain;
> +		dst += copied;
> +		src += copied;
> +		goto fallback;
> +	}
> +
> +	return remain;
> +
> +fallback:
> +	return fallback_scalar_usercopy(dst, src, n);
> +}
> diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
> index 3ab438f30d13..ae8c1453cfcf 100644
> --- a/arch/riscv/lib/uaccess.S
> +++ b/arch/riscv/lib/uaccess.S
> @@ -3,6 +3,8 @@
>  #include <asm/asm.h>
>  #include <asm/asm-extable.h>
>  #include <asm/csr.h>
> +#include <asm/hwcap.h>
> +#include <asm/alternative-macros.h>
>  
>  	.macro fixup op reg addr lbl
>  100:
> @@ -11,6 +13,14 @@
>  	.endm
>  
>  SYM_FUNC_START(__asm_copy_to_user)
> +#ifdef CONFIG_RISCV_ISA_V
> +	ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)

has_vector uses riscv_has_extension_unlikely, but this is the equivalent
of riscv_has_extension_likely. It seems like this should be consistent
across all call sites. Since has_vector uses the unlikely version, this
should probably be rearranged so that the nop is in the non-vector
version and the jump is for the vector version.

A neat optimization you can do here is replace the "nop" with the
instruction that will be executed first. With how it's written right now
you could replace the nop with the la instruction. It's just a nop so
the performance difference is probably not going to be noticable but
it's theoretically better without the nop. The downside of doing this is
that it seems like alternatives do not work with macros so you couldn't
replace the nop with a REG_L instruction, unless there is some trick to
make it work.

> +	la	t0, riscv_v_usercopy_thres
> +	REG_L	t0, (t0)

The assembler does something really silly here it seems. With both
binutils 2.41 and clang 18 the following is generated:

6:   00000297                auipc   t0,0x0
a:   00028293                mv      t0,t0
e:   0002b283                ld      t0,0(t0) # 6 <__asm_copy_from_user+0x4>

However, this la is not needed. You can replace the la + REG_L with just
a REG_L as follows:

REG_L   t0, riscv_v_usercopy_thres

This then generates the following code:

6:   00000297                auipc   t0,0x0
a:   0002b283                ld      t0,0(t0) # 6 <__asm_copy_from_user+0x4>

> +	bltu	a2, t0, fallback_scalar_usercopy
> +	tail enter_vector_usercopy
> +#endif
> +SYM_FUNC_START(fallback_scalar_usercopy)
>  
>  	/* Enable access to user memory */
>  	li t6, SR_SUM
> @@ -181,6 +191,7 @@ SYM_FUNC_START(__asm_copy_to_user)
>  	sub a0, t5, a0
>  	ret
>  SYM_FUNC_END(__asm_copy_to_user)
> +SYM_FUNC_END(fallback_scalar_usercopy)
>  EXPORT_SYMBOL(__asm_copy_to_user)
>  SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
>  EXPORT_SYMBOL(__asm_copy_from_user)
> diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
> new file mode 100644
> index 000000000000..5bebcb1276a2
> --- /dev/null
> +++ b/arch/riscv/lib/uaccess_vector.S
> @@ -0,0 +1,55 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#include <linux/linkage.h>
> +#include <asm-generic/export.h>
> +#include <asm/asm.h>
> +#include <asm/asm-extable.h>
> +#include <asm/csr.h>
> +
> +#define pDst a0
> +#define pSrc a1
> +#define iNum a2
> +
> +#define iVL a3
> +#define pDstPtr a4
> +
> +#define ELEM_LMUL_SETTING m8
> +#define vData v0
> +
> +	.macro fixup op reg addr lbl
> +100:
> +	\op \reg, \addr
> +	_asm_extable	100b, \lbl
> +	.endm
> +
> +SYM_FUNC_START(__asm_vector_usercopy)
> +	/* Enable access to user memory */
> +	li t6, SR_SUM
> +	csrs CSR_STATUS, t6
> +
> +	/* Save for return value */
> +	mv	t5, a2

What's the point of this?

> +
> +	mv pDstPtr, pDst

Why do this move? pDst isn't used anywhere else so you can safely
continue to use pDst everywhere that pDstPtr is used.

- Charlie

> +loop:
> +	vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
> +	fixup vle8.v vData, (pSrc), 10f
> +	fixup vse8.v vData, (pDstPtr), 10f
> +	sub iNum, iNum, iVL
> +	add pSrc, pSrc, iVL
> +	add pDstPtr, pDstPtr, iVL
> +	bnez iNum, loop
> +
> +.Lout_copy_user:
> +	/* Disable access to user memory */
> +	csrc CSR_STATUS, t6
> +	li	a0, 0
> +	ret
> +
> +	/* Exception fixup code */
> +10:
> +	/* Disable access to user memory */
> +	csrc	CSR_STATUS, t6
> +	mv	a0, iNum
> +	ret
> +SYM_FUNC_END(__asm_vector_usercopy)
> -- 
> 2.17.1
>
Andrew Jones Dec. 15, 2023, 1:52 p.m. UTC | #2
On Thu, Dec 14, 2023 at 10:25:49PM -0800, Charlie Jenkins wrote:
> On Thu, Dec 14, 2023 at 03:57:20PM +0000, Andy Chiu wrote:
...
> >  SYM_FUNC_START(__asm_copy_to_user)
> > +#ifdef CONFIG_RISCV_ISA_V
> > +	ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
> 
> has_vector uses riscv_has_extension_unlikely, but this is the equivalent
> of riscv_has_extension_likely. It seems like this should be consistent
> across all call sites. Since has_vector uses the unlikely version, this
> should probably be rearranged so that the nop is in the non-vector
> version and the jump is for the vector version.

I think I prefer it the way it is, where the optimized path is fully
optimized and the fallback path also suffers the jump. (I've also
taken that approach for clear_page()). Also, as extensions are adopted
by more an more platforms, and we start to consider switching unlikelys
to likelys, then it would be easy to miss stuff like this.

> 
> A neat optimization you can do here is replace the "nop" with the
> instruction that will be executed first. With how it's written right now
> you could replace the nop with the la instruction. It's just a nop so
> the performance difference is probably not going to be noticable but
> it's theoretically better without the nop. The downside of doing this is

I think I prefer the nop, because it's easier to read and maintain the 
assembly function when the ALTERNATIVE doesn't do anything other than
choose the entry point.

> that it seems like alternatives do not work with macros so you couldn't
> replace the nop with a REG_L instruction, unless there is some trick to
> make it work.

One should be able to use REG_L in an alternative since macro expansion
will result in the string "ld" or "lw", which can then be concatenated
with its parameters, e.g.

  ALTERNATIVE(REG_L " a1, 0(a2)", "nop", 0, 0, 0)

(But note the space before the a1. Without it, we'd get "lda1,")

Thanks,
drew
Andy Chiu Dec. 19, 2023, 9:58 a.m. UTC | #3
On Fri, Dec 15, 2023 at 2:25 PM Charlie Jenkins <charlie@rivosinc.com> wrote:
> On Thu, Dec 14, 2023 at 03:57:20PM +0000, Andy Chiu wrote:
> > +     la      t0, riscv_v_usercopy_thres
> > +     REG_L   t0, (t0)
>
> The assembler does something really silly here it seems. With both
> binutils 2.41 and clang 18 the following is generated:
>
> 6:   00000297                auipc   t0,0x0
> a:   00028293                mv      t0,t0
> e:   0002b283                ld      t0,0(t0) # 6 <__asm_copy_from_user+0x4>
>
> However, this la is not needed. You can replace the la + REG_L with just
> a REG_L as follows:
>
> REG_L   t0, riscv_v_usercopy_thres
>
> This then generates the following code:
>
> 6:   00000297                auipc   t0,0x0
> a:   0002b283                ld      t0,0(t0) # 6 <__asm_copy_from_user+0x4>
>

Thanks, this will be fixed in v5

> > +     bltu    a2, t0, fallback_scalar_usercopy
> > +     tail enter_vector_usercopy
> > +#endif
> > +SYM_FUNC_START(fallback_scalar_usercopy)
> >
> >       /* Enable access to user memory */
> >       li t6, SR_SUM
> > @@ -181,6 +191,7 @@ SYM_FUNC_START(__asm_copy_to_user)
> >       sub a0, t5, a0
> >       ret
> >  SYM_FUNC_END(__asm_copy_to_user)
> > +SYM_FUNC_END(fallback_scalar_usercopy)
> >  EXPORT_SYMBOL(__asm_copy_to_user)
> >  SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
> >  EXPORT_SYMBOL(__asm_copy_from_user)
> > diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
> > new file mode 100644
> > index 000000000000..5bebcb1276a2
> > --- /dev/null
> > +++ b/arch/riscv/lib/uaccess_vector.S
> > @@ -0,0 +1,55 @@
> > +/* SPDX-License-Identifier: GPL-2.0-only */
> > +
> > +#include <linux/linkage.h>
> > +#include <asm-generic/export.h>
> > +#include <asm/asm.h>
> > +#include <asm/asm-extable.h>
> > +#include <asm/csr.h>
> > +
> > +#define pDst a0
> > +#define pSrc a1
> > +#define iNum a2
> > +
> > +#define iVL a3
> > +#define pDstPtr a4
> > +
> > +#define ELEM_LMUL_SETTING m8
> > +#define vData v0
> > +
> > +     .macro fixup op reg addr lbl
> > +100:
> > +     \op \reg, \addr
> > +     _asm_extable    100b, \lbl
> > +     .endm
> > +
> > +SYM_FUNC_START(__asm_vector_usercopy)
> > +     /* Enable access to user memory */
> > +     li t6, SR_SUM
> > +     csrs CSR_STATUS, t6
> > +
> > +     /* Save for return value */
> > +     mv      t5, a2
>
> What's the point of this?

Oops, I will remove it

>
> > +
> > +     mv pDstPtr, pDst
>
> Why do this move? pDst isn't used anywhere else so you can safely
> continue to use pDst everywhere that pDstPtr is used.

Yes, it makes more sense to remove pDstPtr and use just pDst.

Thanks,
Andy
Andy Chiu Dec. 19, 2023, 2:43 p.m. UTC | #4
On Fri, Dec 15, 2023 at 9:52 PM Andrew Jones <ajones@ventanamicro.com> wrote:
>
> On Thu, Dec 14, 2023 at 10:25:49PM -0800, Charlie Jenkins wrote:
> > On Thu, Dec 14, 2023 at 03:57:20PM +0000, Andy Chiu wrote:
> ...
> > >  SYM_FUNC_START(__asm_copy_to_user)
> > > +#ifdef CONFIG_RISCV_ISA_V
> > > +   ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
> >
> > has_vector uses riscv_has_extension_unlikely, but this is the equivalent
> > of riscv_has_extension_likely. It seems like this should be consistent
> > across all call sites. Since has_vector uses the unlikely version, this
> > should probably be rearranged so that the nop is in the non-vector
> > version and the jump is for the vector version.
>
> I think I prefer it the way it is, where the optimized path is fully
> optimized and the fallback path also suffers the jump. (I've also
> taken that approach for clear_page()). Also, as extensions are adopted
> by more an more platforms, and we start to consider switching unlikelys
> to likelys, then it would be easy to miss stuff like this.
>
> >
> > A neat optimization you can do here is replace the "nop" with the
> > instruction that will be executed first. With how it's written right now
> > you could replace the nop with the la instruction. It's just a nop so
> > the performance difference is probably not going to be noticable but
> > it's theoretically better without the nop. The downside of doing this is
>
> I think I prefer the nop, because it's easier to read and maintain the
> assembly function when the ALTERNATIVE doesn't do anything other than
> choose the entry point.

Good point. I would prefer this approach as well. Loading from a
symbol can take 2 instructions, so we will have to insert a nop
padding for the default path. Though the nop will never execute, it
will make assembly code a bit harder to read. Maybe we could leave it
for future optimization.

>
> > that it seems like alternatives do not work with macros so you couldn't
> > replace the nop with a REG_L instruction, unless there is some trick to
> > make it work.
>
> One should be able to use REG_L in an alternative since macro expansion
> will result in the string "ld" or "lw", which can then be concatenated
> with its parameters, e.g.
>
>   ALTERNATIVE(REG_L " a1, 0(a2)", "nop", 0, 0, 0)
>
> (But note the space before the a1. Without it, we'd get "lda1,")
>

Umm, perhaps I am using an older toolchain. it reports:

arch/riscv/lib/uaccess.S:17: Error: too many positional arguments

on binutil 2.38

> Thanks,
> drew

Thanks,
Andy
diff mbox series

Patch

diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 494f9cd1a00c..1fe8d797e0f2 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -12,3 +12,5 @@  lib-$(CONFIG_RISCV_ISA_ZICBOZ)	+= clear_page.o
 
 obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 lib-$(CONFIG_RISCV_ISA_V)	+= xor.o
+lib-$(CONFIG_RISCV_ISA_V)	+= riscv_v_helpers.o
+lib-$(CONFIG_RISCV_ISA_V)	+= uaccess_vector.o
diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c
new file mode 100644
index 000000000000..d763b9c69fb7
--- /dev/null
+++ b/arch/riscv/lib/riscv_v_helpers.c
@@ -0,0 +1,38 @@ 
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 SiFive
+ * Author: Andy Chiu <andy.chiu@sifive.com>
+ */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+#include <asm/vector.h>
+#include <asm/simd.h>
+
+size_t riscv_v_usercopy_thres = 768;
+int __asm_vector_usercopy(void *dst, void *src, size_t n);
+int fallback_scalar_usercopy(void *dst, void *src, size_t n);
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
+{
+	size_t remain, copied;
+
+	/* skip has_vector() check because it has been done by the asm  */
+	if (!may_use_simd())
+		goto fallback;
+
+	kernel_vector_begin();
+	remain = __asm_vector_usercopy(dst, src, n);
+	kernel_vector_end();
+
+	if (remain) {
+		copied = n - remain;
+		dst += copied;
+		src += copied;
+		goto fallback;
+	}
+
+	return remain;
+
+fallback:
+	return fallback_scalar_usercopy(dst, src, n);
+}
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 3ab438f30d13..ae8c1453cfcf 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -3,6 +3,8 @@ 
 #include <asm/asm.h>
 #include <asm/asm-extable.h>
 #include <asm/csr.h>
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
 
 	.macro fixup op reg addr lbl
 100:
@@ -11,6 +13,14 @@ 
 	.endm
 
 SYM_FUNC_START(__asm_copy_to_user)
+#ifdef CONFIG_RISCV_ISA_V
+	ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
+	la	t0, riscv_v_usercopy_thres
+	REG_L	t0, (t0)
+	bltu	a2, t0, fallback_scalar_usercopy
+	tail enter_vector_usercopy
+#endif
+SYM_FUNC_START(fallback_scalar_usercopy)
 
 	/* Enable access to user memory */
 	li t6, SR_SUM
@@ -181,6 +191,7 @@  SYM_FUNC_START(__asm_copy_to_user)
 	sub a0, t5, a0
 	ret
 SYM_FUNC_END(__asm_copy_to_user)
+SYM_FUNC_END(fallback_scalar_usercopy)
 EXPORT_SYMBOL(__asm_copy_to_user)
 SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_from_user)
diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
new file mode 100644
index 000000000000..5bebcb1276a2
--- /dev/null
+++ b/arch/riscv/lib/uaccess_vector.S
@@ -0,0 +1,55 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+#include <asm/asm-extable.h>
+#include <asm/csr.h>
+
+#define pDst a0
+#define pSrc a1
+#define iNum a2
+
+#define iVL a3
+#define pDstPtr a4
+
+#define ELEM_LMUL_SETTING m8
+#define vData v0
+
+	.macro fixup op reg addr lbl
+100:
+	\op \reg, \addr
+	_asm_extable	100b, \lbl
+	.endm
+
+SYM_FUNC_START(__asm_vector_usercopy)
+	/* Enable access to user memory */
+	li t6, SR_SUM
+	csrs CSR_STATUS, t6
+
+	/* Save for return value */
+	mv	t5, a2
+
+	mv pDstPtr, pDst
+loop:
+	vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
+	fixup vle8.v vData, (pSrc), 10f
+	fixup vse8.v vData, (pDstPtr), 10f
+	sub iNum, iNum, iVL
+	add pSrc, pSrc, iVL
+	add pDstPtr, pDstPtr, iVL
+	bnez iNum, loop
+
+.Lout_copy_user:
+	/* Disable access to user memory */
+	csrc CSR_STATUS, t6
+	li	a0, 0
+	ret
+
+	/* Exception fixup code */
+10:
+	/* Disable access to user memory */
+	csrc	CSR_STATUS, t6
+	mv	a0, iNum
+	ret
+SYM_FUNC_END(__asm_vector_usercopy)