@@ -70,6 +70,13 @@ static __always_inline bool has_fpu(void) { return false; }
#define __switch_to_fpu(__prev, __next) do { } while (0)
#endif
+static inline void __switch_to_envcfg(struct task_struct *next)
+{
+ asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0",
+ 0, RISCV_ISA_EXT_XLINUXENVCFG, 1)
+ :: "r" (next->thread_info.envcfg) : "memory");
+}
+
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
@@ -103,6 +110,7 @@ do { \
__switch_to_vector(__prev, __next); \
if (switch_to_should_flush_icache(__next)) \
local_flush_icache_all(); \
+ __switch_to_envcfg(__next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
@@ -57,6 +57,7 @@ struct thread_info {
long user_sp; /* User stack pointer */
int cpu;
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+ unsigned long envcfg;
#ifdef CONFIG_SHADOW_CALL_STACK
void *scs_base;
void *scs_sp;
@@ -922,7 +922,7 @@ unsigned long riscv_get_elf_hwcap(void)
void riscv_user_isa_enable(void)
{
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ))
- csr_set(CSR_ENVCFG, ENVCFG_CBZE);
+ current->thread_info.envcfg |= ENVCFG_CBZE;
else if (any_cpu_has_zicboz)
pr_warn_once("Zicboz disabled as it is unavailable on some harts\n");
}