@@ -78,7 +78,7 @@ do { \
struct task_struct *__next = (next); \
if (has_fpu()) \
__switch_to_fpu(__prev, __next); \
- if (has_vector()) \
+ if (has_vector(ZVE32X)) \
__switch_to_vector(__prev, __next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
@@ -18,6 +18,7 @@
#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/asm.h>
+#include <asm/bug.h>
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
@@ -35,10 +36,16 @@ static inline u32 riscv_v_flags(void)
return READ_ONCE(current->thread.riscv_v_flags);
}
-static __always_inline bool has_vector(void)
-{
- return riscv_has_extension_unlikely(RISCV_ISA_EXT_v);
-}
+#define has_vector(VEXT) \
+({ \
+ static_assert(RISCV_ISA_EXT_##VEXT == RISCV_ISA_EXT_ZVE32X || \
+ RISCV_ISA_EXT_##VEXT == RISCV_ISA_EXT_ZVE32F || \
+ RISCV_ISA_EXT_##VEXT == RISCV_ISA_EXT_ZVE64X || \
+ RISCV_ISA_EXT_##VEXT == RISCV_ISA_EXT_ZVE64F || \
+ RISCV_ISA_EXT_##VEXT == RISCV_ISA_EXT_ZVE64D || \
+ RISCV_ISA_EXT_##VEXT == RISCV_ISA_EXT_v); \
+ riscv_has_extension_unlikely(RISCV_ISA_EXT_##VEXT); \
+})
static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
{
@@ -131,7 +138,7 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
riscv_v_enable();
asm volatile (
".option push\n\t"
- ".option arch, +v\n\t"
+ ".option arch, +zve32x\n\t"
"vsetvli %0, x0, e8, m8, ta, ma\n\t"
"vle8.v v0, (%1)\n\t"
"add %1, %1, %0\n\t"
@@ -153,7 +160,7 @@ static inline void __riscv_v_vstate_discard(void)
riscv_v_enable();
asm volatile (
".option push\n\t"
- ".option arch, +v\n\t"
+ ".option arch, +zve32x\n\t"
"vsetvli %0, x0, e8, m8, ta, ma\n\t"
"vmv.v.i v0, -1\n\t"
"vmv.v.i v8, -1\n\t"
@@ -267,7 +274,7 @@ bool riscv_v_vstate_ctrl_user_allowed(void);
struct pt_regs;
static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
-static __always_inline bool has_vector(void) { return false; }
+static __always_inline bool has_vector(unsigned long min_sub_ext) { return false; }
static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
@@ -61,7 +61,7 @@ static struct xor_block_template xor_block_rvv = {
do { \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \
- if (has_vector()) { \
+ if (has_vector(ZVE32X)) { \
xor_speed(&xor_block_rvv);\
} \
} while (0)
@@ -705,12 +705,15 @@ void __init riscv_fill_hwcap(void)
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
}
- if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) {
/*
* This callsite can't fail here. It cannot fail when called on
* the boot hart.
*/
riscv_v_setup_vsize();
+ }
+
+ if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
/*
* ISA string in device tree might have 'v' flag, but
* CONFIG_RISCV_ISA_V is disabled in kernel.
@@ -208,7 +208,7 @@ void kernel_vector_begin(void)
{
bool nested = false;
- if (WARN_ON(!has_vector()))
+ if (WARN_ON(!has_vector(ZVE32X)))
return;
BUG_ON(!may_use_simd());
@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(kernel_vector_begin);
*/
void kernel_vector_end(void)
{
- if (WARN_ON(!has_vector()))
+ if (WARN_ON(!has_vector(ZVE32X)))
return;
riscv_v_disable();
@@ -178,7 +178,7 @@ void flush_thread(void)
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
- if (has_vector())
+ if (has_vector(ZVE32X))
riscv_v_thread_free(tsk);
}
@@ -225,7 +225,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.s[0] = 0;
}
p->thread.riscv_v_flags = 0;
- if (has_vector())
+ if (has_vector(ZVE32X))
riscv_v_thread_alloc(p);
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
@@ -188,7 +188,7 @@ static long restore_sigcontext(struct pt_regs *regs,
return 0;
case RISCV_V_MAGIC:
- if (!has_vector() || !riscv_v_vstate_query(regs) ||
+ if (!has_vector(ZVE32X) || !riscv_v_vstate_query(regs) ||
size != riscv_v_sc_size)
return -EINVAL;
@@ -210,7 +210,7 @@ static size_t get_rt_frame_size(bool cal_all)
frame_size = sizeof(*frame);
- if (has_vector()) {
+ if (has_vector(ZVE32X)) {
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
@@ -283,7 +283,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
if (has_fpu())
err |= save_fp_state(regs, &sc->sc_fpregs);
/* Save the vector state. */
- if (has_vector() && riscv_v_vstate_query(regs))
+ if (has_vector(ZVE32X) && riscv_v_vstate_query(regs))
err |= save_v_state(regs, (void __user **)&sc_ext_ptr);
/* Write zero to fp-reserved space and check it on restore_sigcontext */
err |= __put_user(0, &sc->sc_extdesc.reserved);
@@ -218,7 +218,7 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
- if (has_vector()) {
+ if (has_vector(ZVE32X)) {
/*
* Return as early as possible so the hart with a mismatching
* vlen won't boot.
@@ -69,7 +69,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
if (riscv_isa_extension_available(NULL, c))
pair->value |= RISCV_HWPROBE_IMA_C;
- if (has_vector())
+ if (has_vector(v))
pair->value |= RISCV_HWPROBE_IMA_V;
/*
@@ -112,7 +112,11 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZACAS);
EXT_KEY(ZICOND);
- if (has_vector()) {
+ /*
+ * Vector crypto and ZVE* extensions are supported only if
+ * kernel has minimum V support of ZVE32X.
+ */
+ if (has_vector(ZVE32X)) {
EXT_KEY(ZVE32X);
EXT_KEY(ZVE32F);
EXT_KEY(ZVE64X);
@@ -53,7 +53,7 @@ int riscv_v_setup_vsize(void)
void __init riscv_v_setup_ctx_cache(void)
{
- if (!has_vector())
+ if (!has_vector(ZVE32X))
return;
riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
@@ -173,8 +173,11 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
u32 __user *epc = (u32 __user *)regs->epc;
u32 insn = (u32)regs->badaddr;
+ if (!has_vector(ZVE32X))
+ return false;
+
/* Do not handle if V is not supported, or disabled */
- if (!(ELF_HWCAP & COMPAT_HWCAP_ISA_V))
+ if (!riscv_v_vstate_ctrl_user_allowed())
return false;
/* If V has been enabled then it is not the first-use trap */
@@ -213,7 +216,7 @@ void riscv_v_vstate_ctrl_init(struct task_struct *tsk)
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!has_vector(ZVE32X))
return;
next = riscv_v_ctrl_get_next(tsk);
@@ -235,7 +238,7 @@ void riscv_v_vstate_ctrl_init(struct task_struct *tsk)
long riscv_v_vstate_ctrl_get_current(void)
{
- if (!has_vector())
+ if (!has_vector(ZVE32X))
return -EINVAL;
return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK;
@@ -246,7 +249,7 @@ long riscv_v_vstate_ctrl_set_current(unsigned long arg)
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!has_vector(ZVE32X))
return -EINVAL;
if (arg & ~PR_RISCV_V_VSTATE_CTRL_MASK)
@@ -296,7 +299,7 @@ static struct ctl_table riscv_v_default_vstate_table[] = {
static int __init riscv_v_sysctl_init(void)
{
- if (has_vector())
+ if (has_vector(ZVE32X))
if (!register_sysctl("abi", riscv_v_default_vstate_table))
return -EINVAL;
return 0;
@@ -14,7 +14,7 @@
SYM_FUNC_START(__asm_copy_to_user)
#ifdef CONFIG_RISCV_ISA_V
- ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
+ ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_ZVE32X, CONFIG_RISCV_ISA_V)
REG_L t0, riscv_v_usercopy_threshold
bltu a2, t0, fallback_scalar_usercopy
tail enter_vector_usercopy