Message ID | 20230715150032.6917-6-andy.chiu@sifive.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | riscv: support kernel-mode Vector | expand |
Context | Check | Description |
---|---|---|
conchuod/cover_letter | success | Series has a cover letter |
conchuod/tree_selection | success | Guessed tree name to be for-next at HEAD 471aba2e4760 |
conchuod/fixes_present | success | Fixes tag not required for -next series |
conchuod/maintainers_pattern | success | MAINTAINERS pattern errors before the patch: 4 and now 4 |
conchuod/verify_signedoff | success | Signed-off-by tag matches author and committer |
conchuod/kdoc | success | Errors and warnings before: 0 this patch: 0 |
conchuod/build_rv64_clang_allmodconfig | fail | Failed to build the tree with this patch. |
conchuod/module_param | success | Was 0 now: 0 |
conchuod/build_rv64_gcc_allmodconfig | fail | Errors and warnings before: 15784 this patch: 15877 |
conchuod/build_rv32_defconfig | fail | Build failed |
conchuod/dtb_warn_rv64 | success | Errors and warnings before: 3 this patch: 3 |
conchuod/header_inline | success | No static functions without inline keyword in header files |
conchuod/checkpatch | warning | CHECK: Prefer using the BIT macro |
conchuod/build_rv64_nommu_k210_defconfig | success | Build OK |
conchuod/verify_fixes | success | No Fixes tag |
conchuod/build_rv64_nommu_virt_defconfig | success | Build OK |
On Sat, Jul 15, 2023 at 03:00:31PM +0000, Andy Chiu wrote: > Add kernel_vstate to keep track of kernel-mode Vector registers when > trap introduced context switch happens. Also, provide trap_pt_regs to > let context save/restore routine reference status.VS at which the trap > takes place. The thread flag TIF_RISCV_V_KMV indicates whether a task is > running in kernel-mode Vector with preemption 'ON'. So context switch > routines know and would save V-regs to kernel_vstate and restore V-regs > immediately from kernel_vstate if the bit is set. > > Apart from a task's preemption status, the capability of > running preemptive kernel-mode Vector is jointly controlled by the > RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE mask in the task's > thread.vstate_ctrl. This bit is masked whenever a trap takes place in > kernel mode while executing preemptive Vector code. > > Signed-off-by: Andy Chiu <andy.chiu@sifive.com> > --- > arch/riscv/include/asm/processor.h | 2 + > arch/riscv/include/asm/thread_info.h | 4 ++ > arch/riscv/include/asm/vector.h | 27 ++++++++++-- > arch/riscv/kernel/asm-offsets.c | 2 + > arch/riscv/kernel/entry.S | 41 ++++++++++++++++++ > arch/riscv/kernel/kernel_mode_vector.c | 57 ++++++++++++++++++++++++-- > arch/riscv/kernel/process.c | 8 +++- > arch/riscv/kernel/vector.c | 3 +- > 8 files changed, 136 insertions(+), 8 deletions(-) > > diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h > index e82af1097e26..d337b750f2ec 100644 > --- a/arch/riscv/include/asm/processor.h > +++ b/arch/riscv/include/asm/processor.h > @@ -42,6 +42,8 @@ struct thread_struct { > unsigned long bad_cause; > unsigned long vstate_ctrl; > struct __riscv_v_ext_state vstate; > + struct pt_regs *trap_pt_regs; > + struct __riscv_v_ext_state kernel_vstate; > }; > > /* Whitelist the fstate from the task_struct for hardened usercopy */ > diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h > index d83975efe866..59d88adfc4de 100644 > --- a/arch/riscv/include/asm/thread_info.h > +++ b/arch/riscv/include/asm/thread_info.h > @@ -102,6 +102,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); > #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ > #define TIF_32BIT 11 /* compat-mode 32bit process */ > #define TIF_RISCV_V_DEFER_RESTORE 12 > +#define TIF_RISCV_V_KMV 13 Same comment about comments. Also, the "V" here is a dupe, since you have RISCV_V in the name. Ditto everywhere else, afaict. Perhaps you could do s/KMV/KERNEL_MODE/? > #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) > #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) > @@ -109,9 +110,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); > #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) > #define _TIF_UPROBE (1 << TIF_UPROBE) > #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) > +#define _TIF_RISCV_V_KMV (1 << TIF_RISCV_V_KMV_TASK) Where is KMV_TASK defined? > > #define _TIF_WORK_MASK \ > (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ > _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) > > +#define RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE 0x20 > + > #endif /* _ASM_RISCV_THREAD_INFO_H */ > diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h > index 50c556afd95a..d004c9fa6a57 100644 > --- a/arch/riscv/include/asm/vector.h > +++ b/arch/riscv/include/asm/vector.h > @@ -25,6 +25,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs); > int kernel_rvv_begin(void); > void kernel_rvv_end(void); > > +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV > +void riscv_v_vstate_ctrl_config_kmv(bool preemptive_kmv); > +#else > +#define riscv_v_vstate_ctrl_config_kmv(preemptive_kmv) do {} while (0) > +#endif For clang/llvm allmodconfig: ../arch/riscv/kernel/process.c:213:2: error: call to undeclared function 'riscv_v_vstate_ctrl_config_kmv'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration] Probably also happens when vector is disabled? > + > static __always_inline bool has_vector(void) > { > return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); > @@ -195,9 +201,24 @@ static inline void __switch_to_vector(struct task_struct *prev, > { > struct pt_regs *regs; > > - regs = task_pt_regs(prev); > - riscv_v_vstate_save(prev->thread.vstate, regs); > - riscv_v_vstate_set_restore(next, task_pt_regs(next)); > + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV) && w.r.t. this symbol, just drop the KMV? > + test_tsk_thread_flag(prev, TIF_RISCV_V_KMV)) { > + regs = prev->thread.trap_pt_regs; > + WARN_ON(!regs); > + riscv_v_vstate_save(&prev->thread.kernel_vstate, regs); > + } else { > + regs = task_pt_regs(prev); > + riscv_v_vstate_save(&prev->thread.vstate, regs); > + } > + > + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV) && Possibly stupid question, but not explained by the patch, why would we ever want to have RISCV_ISA_V_PREEMPTIVE_KMV disabled? > + test_tsk_thread_flag(next, TIF_RISCV_V_KMV)) { > + regs = next->thread.trap_pt_regs; > + WARN_ON(!regs); > + riscv_v_vstate_restore(&next->thread.kernel_vstate, regs); > + } else { > + riscv_v_vstate_set_restore(next, task_pt_regs(next)); > + } > } > > void riscv_v_vstate_ctrl_init(struct task_struct *tsk); > diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c > index d6a75aac1d27..4b062f7741b2 100644 > --- a/arch/riscv/kernel/asm-offsets.c > +++ b/arch/riscv/kernel/asm-offsets.c > @@ -38,6 +38,8 @@ void asm_offsets(void) > OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); > OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); > OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); > + OFFSET(TASK_THREAD_TRAP_REGP, task_struct, thread.trap_pt_regs); > + OFFSET(TASK_THREAD_VSTATE_CTRL, task_struct, thread.vstate_ctrl); > > OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); > OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S > index 143a2bb3e697..42b80b90626a 100644 > --- a/arch/riscv/kernel/entry.S > +++ b/arch/riscv/kernel/entry.S > @@ -66,6 +66,27 @@ _save_context: > REG_S s4, PT_CAUSE(sp) > REG_S s5, PT_TP(sp) > > + /* > + * Reocrd the register set at the frame where in-kernel V registers are nit: s/Reocrd/Record/ > diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c > index 30f1b861cac0..bcd6a69a5266 100644 > --- a/arch/riscv/kernel/kernel_mode_vector.c > +++ b/arch/riscv/kernel/kernel_mode_vector.c > @@ -10,6 +10,7 @@ > #include <linux/percpu.h> > #include <linux/preempt.h> > #include <linux/types.h> > +#include <linux/slab.h> > > #include <asm/vector.h> > #include <asm/switch_to.h> > @@ -35,7 +36,8 @@ static __must_check inline bool may_use_vector(void) > * where it is set. > */ > return !in_irq() && !irqs_disabled() && !in_nmi() && > - !this_cpu_read(vector_context_busy); > + !this_cpu_read(vector_context_busy) && > + !test_thread_flag(TIF_RISCV_V_KMV); > } > > /* > @@ -69,6 +71,47 @@ static void put_cpu_vector_context(void) > preempt_enable(); > } > > +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV > +void riscv_v_vstate_ctrl_config_kmv(bool preemptive_kmv) I don't understand what this function is trying to do, based on the function name. The lack of a verb in it is somewhat confusing. > +{ > + if (preemptive_kmv) > + current->thread.vstate_ctrl |= RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE; > + else > + current->thread.vstate_ctrl &= ~RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE; > +} > + > +static bool riscv_v_kmv_preempitble(void) Beyond the ible/able stuff, there's a typo in this function name. > +{ > + return !!(current->thread.vstate_ctrl & RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE); > +} Little comment on the rest, not qualified to do so :) Thanks, Conor.
On Mon, Jul 17, 2023 at 7:06 PM Conor Dooley <conor.dooley@microchip.com> wrote: > > On Sat, Jul 15, 2023 at 03:00:31PM +0000, Andy Chiu wrote: > > Add kernel_vstate to keep track of kernel-mode Vector registers when > > trap introduced context switch happens. Also, provide trap_pt_regs to > > let context save/restore routine reference status.VS at which the trap > > takes place. The thread flag TIF_RISCV_V_KMV indicates whether a task is > > running in kernel-mode Vector with preemption 'ON'. So context switch > > routines know and would save V-regs to kernel_vstate and restore V-regs > > immediately from kernel_vstate if the bit is set. > > > > Apart from a task's preemption status, the capability of > > running preemptive kernel-mode Vector is jointly controlled by the > > RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE mask in the task's > > thread.vstate_ctrl. This bit is masked whenever a trap takes place in > > kernel mode while executing preemptive Vector code. > > > > Signed-off-by: Andy Chiu <andy.chiu@sifive.com> > > --- > > arch/riscv/include/asm/processor.h | 2 + > > arch/riscv/include/asm/thread_info.h | 4 ++ > > arch/riscv/include/asm/vector.h | 27 ++++++++++-- > > arch/riscv/kernel/asm-offsets.c | 2 + > > arch/riscv/kernel/entry.S | 41 ++++++++++++++++++ > > arch/riscv/kernel/kernel_mode_vector.c | 57 ++++++++++++++++++++++++-- > > arch/riscv/kernel/process.c | 8 +++- > > arch/riscv/kernel/vector.c | 3 +- > > 8 files changed, 136 insertions(+), 8 deletions(-) > > > > diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h > > index e82af1097e26..d337b750f2ec 100644 > > --- a/arch/riscv/include/asm/processor.h > > +++ b/arch/riscv/include/asm/processor.h > > @@ -42,6 +42,8 @@ struct thread_struct { > > unsigned long bad_cause; > > unsigned long vstate_ctrl; > > struct __riscv_v_ext_state vstate; > > + struct pt_regs *trap_pt_regs; > > + struct __riscv_v_ext_state kernel_vstate; > > }; > > > > /* Whitelist the fstate from the task_struct for hardened usercopy */ > > diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h > > index d83975efe866..59d88adfc4de 100644 > > --- a/arch/riscv/include/asm/thread_info.h > > +++ b/arch/riscv/include/asm/thread_info.h > > @@ -102,6 +102,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); > > #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ > > #define TIF_32BIT 11 /* compat-mode 32bit process */ > > #define TIF_RISCV_V_DEFER_RESTORE 12 > > +#define TIF_RISCV_V_KMV 13 > > Same comment about comments. Adding /* kernel-mode Vector run with preemption-on */ > > Also, the "V" here is a dupe, since you have RISCV_V in the name. > Ditto everywhere else, afaict. Perhaps you could do s/KMV/KERNEL_MODE/? Good idea. > > > #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) > > #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) > > @@ -109,9 +110,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); > > #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) > > #define _TIF_UPROBE (1 << TIF_UPROBE) > > #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) > > +#define _TIF_RISCV_V_KMV (1 << TIF_RISCV_V_KMV_TASK) > > Where is KMV_TASK defined? My bad, it should be TIF_RISCV_V_KMV. Also, I'm changing it to TIF_RISCV_V_KERNEL_MODE now. > > > > > #define _TIF_WORK_MASK \ > > (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ > > _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) > > > > +#define RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE 0x20 > > + > > #endif /* _ASM_RISCV_THREAD_INFO_H */ > > diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h > > index 50c556afd95a..d004c9fa6a57 100644 > > --- a/arch/riscv/include/asm/vector.h > > +++ b/arch/riscv/include/asm/vector.h > > @@ -25,6 +25,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs); > > int kernel_rvv_begin(void); > > void kernel_rvv_end(void); > > > > +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV > > +void riscv_v_vstate_ctrl_config_kmv(bool preemptive_kmv); > > +#else > > +#define riscv_v_vstate_ctrl_config_kmv(preemptive_kmv) do {} while (0) > > +#endif > > For clang/llvm allmodconfig: > ../arch/riscv/kernel/process.c:213:2: error: call to undeclared function 'riscv_v_vstate_ctrl_config_kmv'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration] > > Probably also happens when vector is disabled? Yes, I'm going to move the entire block out of CONFIG_RISCV_ISA_V to resolve that. > > > > + > > static __always_inline bool has_vector(void) > > { > > return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); > > @@ -195,9 +201,24 @@ static inline void __switch_to_vector(struct task_struct *prev, > > { > > struct pt_regs *regs; > > > > - regs = task_pt_regs(prev); > > - riscv_v_vstate_save(prev->thread.vstate, regs); > > - riscv_v_vstate_set_restore(next, task_pt_regs(next)); > > + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV) && > > w.r.t. this symbol, just drop the KMV? > > > + test_tsk_thread_flag(prev, TIF_RISCV_V_KMV)) { > > + regs = prev->thread.trap_pt_regs; > > + WARN_ON(!regs); > > + riscv_v_vstate_save(&prev->thread.kernel_vstate, regs); > > + } else { > > + regs = task_pt_regs(prev); > > + riscv_v_vstate_save(&prev->thread.vstate, regs); > > + } > > + > > + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV) && > > Possibly stupid question, but not explained by the patch, why would we > ever want to have RISCV_ISA_V_PREEMPTIVE_KMV disabled? Sorry, it's not obvious here. Below is the commit message that I will add for describing usecase of RISCV_ISA_V_PREEMPTIVE_KMV (now RISCV_ISA_V_PREEMPTIVE): provide a config CONFIG_RISCV_ISA_V_PREEMPTIVE to give users an option to disable preemptible kernel-mode Vector at build time. Users with constraint memory may want to disable this config as preemptible kernel-mode Vector needs extra space for tracking per thread's kernel-mode V context. Or, users might as well want to disable it if all kernel-mode Vector code is time sensitive and cannot tolerate context switch overhead. > > > + test_tsk_thread_flag(next, TIF_RISCV_V_KMV)) { > > + regs = next->thread.trap_pt_regs; > > + WARN_ON(!regs); > > + riscv_v_vstate_restore(&next->thread.kernel_vstate, regs); > > + } else { > > + riscv_v_vstate_set_restore(next, task_pt_regs(next)); > > + } > > } > > > > void riscv_v_vstate_ctrl_init(struct task_struct *tsk); > > diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c > > index d6a75aac1d27..4b062f7741b2 100644 > > --- a/arch/riscv/kernel/asm-offsets.c > > +++ b/arch/riscv/kernel/asm-offsets.c > > @@ -38,6 +38,8 @@ void asm_offsets(void) > > OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); > > OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); > > OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); > > + OFFSET(TASK_THREAD_TRAP_REGP, task_struct, thread.trap_pt_regs); > > + OFFSET(TASK_THREAD_VSTATE_CTRL, task_struct, thread.vstate_ctrl); > > > > OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); > > OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); > > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S > > index 143a2bb3e697..42b80b90626a 100644 > > --- a/arch/riscv/kernel/entry.S > > +++ b/arch/riscv/kernel/entry.S > > @@ -66,6 +66,27 @@ _save_context: > > REG_S s4, PT_CAUSE(sp) > > REG_S s5, PT_TP(sp) > > > > + /* > > + * Reocrd the register set at the frame where in-kernel V registers are > > nit: s/Reocrd/Record/ Oops. > > > diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c > > index 30f1b861cac0..bcd6a69a5266 100644 > > --- a/arch/riscv/kernel/kernel_mode_vector.c > > +++ b/arch/riscv/kernel/kernel_mode_vector.c > > @@ -10,6 +10,7 @@ > > #include <linux/percpu.h> > > #include <linux/preempt.h> > > #include <linux/types.h> > > +#include <linux/slab.h> > > > > #include <asm/vector.h> > > #include <asm/switch_to.h> > > @@ -35,7 +36,8 @@ static __must_check inline bool may_use_vector(void) > > * where it is set. > > */ > > return !in_irq() && !irqs_disabled() && !in_nmi() && > > - !this_cpu_read(vector_context_busy); > > + !this_cpu_read(vector_context_busy) && > > + !test_thread_flag(TIF_RISCV_V_KMV); > > } > > > > /* > > @@ -69,6 +71,47 @@ static void put_cpu_vector_context(void) > > preempt_enable(); > > } > > > > +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV > > +void riscv_v_vstate_ctrl_config_kmv(bool preemptive_kmv) > > I don't understand what this function is trying to do, based on the > function name. The lack of a verb in it is somewhat confusing. The purpose of this function is to allow/disallow kernel-mode Vector to be executed with kernel preemption. I am going to change the function name to kernel_vector_allow_preemption() since there is only one user of this function and the only purpose is to initialize it to be "allowed" when the config is y. > > > +{ > > + if (preemptive_kmv) > > + current->thread.vstate_ctrl |= RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE; > > + else > > + current->thread.vstate_ctrl &= ~RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE; > > +} > > + > > +static bool riscv_v_kmv_preempitble(void) > > Beyond the ible/able stuff, there's a typo in this function name. I am going to change the function name to kernel_vector_preemptible to match the naming scheme above. > > > +{ > > + return !!(current->thread.vstate_ctrl & RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE); > > +} > > Little comment on the rest, not qualified to do so :) > > Thanks, > Conor. Thanks, Andy
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index e82af1097e26..d337b750f2ec 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -42,6 +42,8 @@ struct thread_struct { unsigned long bad_cause; unsigned long vstate_ctrl; struct __riscv_v_ext_state vstate; + struct pt_regs *trap_pt_regs; + struct __riscv_v_ext_state kernel_vstate; }; /* Whitelist the fstate from the task_struct for hardened usercopy */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index d83975efe866..59d88adfc4de 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -102,6 +102,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define TIF_32BIT 11 /* compat-mode 32bit process */ #define TIF_RISCV_V_DEFER_RESTORE 12 +#define TIF_RISCV_V_KMV 13 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -109,9 +110,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) +#define _TIF_RISCV_V_KMV (1 << TIF_RISCV_V_KMV_TASK) #define _TIF_WORK_MASK \ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) +#define RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE 0x20 + #endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index 50c556afd95a..d004c9fa6a57 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -25,6 +25,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs); int kernel_rvv_begin(void); void kernel_rvv_end(void); +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV +void riscv_v_vstate_ctrl_config_kmv(bool preemptive_kmv); +#else +#define riscv_v_vstate_ctrl_config_kmv(preemptive_kmv) do {} while (0) +#endif + static __always_inline bool has_vector(void) { return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); @@ -195,9 +201,24 @@ static inline void __switch_to_vector(struct task_struct *prev, { struct pt_regs *regs; - regs = task_pt_regs(prev); - riscv_v_vstate_save(prev->thread.vstate, regs); - riscv_v_vstate_set_restore(next, task_pt_regs(next)); + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV) && + test_tsk_thread_flag(prev, TIF_RISCV_V_KMV)) { + regs = prev->thread.trap_pt_regs; + WARN_ON(!regs); + riscv_v_vstate_save(&prev->thread.kernel_vstate, regs); + } else { + regs = task_pt_regs(prev); + riscv_v_vstate_save(&prev->thread.vstate, regs); + } + + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV) && + test_tsk_thread_flag(next, TIF_RISCV_V_KMV)) { + regs = next->thread.trap_pt_regs; + WARN_ON(!regs); + riscv_v_vstate_restore(&next->thread.kernel_vstate, regs); + } else { + riscv_v_vstate_set_restore(next, task_pt_regs(next)); + } } void riscv_v_vstate_ctrl_init(struct task_struct *tsk); diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index d6a75aac1d27..4b062f7741b2 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -38,6 +38,8 @@ void asm_offsets(void) OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); + OFFSET(TASK_THREAD_TRAP_REGP, task_struct, thread.trap_pt_regs); + OFFSET(TASK_THREAD_VSTATE_CTRL, task_struct, thread.vstate_ctrl); OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 143a2bb3e697..42b80b90626a 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -66,6 +66,27 @@ _save_context: REG_S s4, PT_CAUSE(sp) REG_S s5, PT_TP(sp) + /* + * Reocrd the register set at the frame where in-kernel V registers are + * last alive. + */ + REG_L s0, TASK_TI_FLAGS(tp) + li s1, 1 << TIF_RISCV_V_KMV + and s0, s0, s1 + beqz s0, 1f + li s0, TASK_THREAD_TRAP_REGP + add s0, s0, tp + REG_L s1, (s0) + bnez s1, 1f + REG_S sp, (s0) + li s0, TASK_THREAD_VSTATE_CTRL + add s0, s0, tp + REG_L s1, (s0) + li s2, ~RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE + and s1, s1, s2 + REG_S s1, (s0) +1: + /* * Set the scratch register to 0, so that if a recursive exception * occurs, the exception vector knows it came from the kernel @@ -129,6 +150,26 @@ SYM_CODE_START_NOALIGN(ret_from_exception) */ csrw CSR_SCRATCH, tp 1: + /* + * Clear tracking of the trap registers when we return to the frame + * that uses kernel mode Vector. + */ + REG_L s0, TASK_TI_FLAGS(tp) + li s1, 1 << TIF_RISCV_V_KMV + and s0, s0, s1 + beqz s0, 1f + li s0, TASK_THREAD_TRAP_REGP + add s0, s0, tp + REG_L s1, (s0) + bne s1, sp, 1f + REG_S x0, (s0) + li s0, TASK_THREAD_VSTATE_CTRL + add s0, s0, tp + REG_L s1, (s0) + ori s1, s1, RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE + REG_S s1, (s0) +1: + REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c index 30f1b861cac0..bcd6a69a5266 100644 --- a/arch/riscv/kernel/kernel_mode_vector.c +++ b/arch/riscv/kernel/kernel_mode_vector.c @@ -10,6 +10,7 @@ #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/types.h> +#include <linux/slab.h> #include <asm/vector.h> #include <asm/switch_to.h> @@ -35,7 +36,8 @@ static __must_check inline bool may_use_vector(void) * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !this_cpu_read(vector_context_busy); + !this_cpu_read(vector_context_busy) && + !test_thread_flag(TIF_RISCV_V_KMV); } /* @@ -69,6 +71,47 @@ static void put_cpu_vector_context(void) preempt_enable(); } +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV +void riscv_v_vstate_ctrl_config_kmv(bool preemptive_kmv) +{ + if (preemptive_kmv) + current->thread.vstate_ctrl |= RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE; + else + current->thread.vstate_ctrl &= ~RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE; +} + +static bool riscv_v_kmv_preempitble(void) +{ + return !!(current->thread.vstate_ctrl & RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE); +} + +static int riscv_v_start_kernel_context(void) +{ + struct __riscv_v_ext_state *vstate; + + vstate = ¤t->thread.kernel_vstate; + if (!vstate->datap) { + vstate->datap = kmalloc(riscv_v_vsize, GFP_KERNEL); + if (!vstate->datap) + return -ENOMEM; + } + + current->thread.trap_pt_regs = NULL; + WARN_ON(test_and_set_thread_flag(TIF_RISCV_V_KMV)); + return 0; +} + +static void riscv_v_stop_kernel_context(void) +{ + WARN_ON(!test_and_clear_thread_flag(TIF_RISCV_V_KMV)); + current->thread.trap_pt_regs = NULL; +} +#else +#define riscv_v_kmv_preempitble() (false) +#define riscv_v_start_kernel_context() (0) +#define riscv_v_stop_kernel_context() do {} while (0) +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV */ + /* * kernel_rvv_begin(): obtain the CPU vector registers for use by the calling * context @@ -94,7 +137,12 @@ int kernel_rvv_begin(void) riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current)); /* Acquire kernel mode vector */ - get_cpu_vector_context(); + if (!preemptible() || !riscv_v_kmv_preempitble()) { + get_cpu_vector_context(); + } else { + if (riscv_v_start_kernel_context()) + get_cpu_vector_context(); + } /* Enable vector */ riscv_v_enable(); @@ -124,6 +172,9 @@ void kernel_rvv_end(void) riscv_v_disable(); /* release kernel mode vector */ - put_cpu_vector_context(); + if (!test_thread_flag(TIF_RISCV_V_KMV)) + put_cpu_vector_context(); + else + riscv_v_stop_kernel_context(); } EXPORT_SYMBOL_GPL(kernel_rvv_end); diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index ec89e7edb6fd..4db8cbc8abe9 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -160,8 +160,11 @@ void flush_thread(void) void arch_release_task_struct(struct task_struct *tsk) { /* Free the vector context of datap. */ - if (has_vector()) + if (has_vector()) { kfree(tsk->thread.vstate.datap); + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV)) + kfree(tsk->thread.kernel_vstate.datap); + } } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -170,7 +173,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) *dst = *src; /* clear entire V context, including datap for a new task */ memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); + memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state)); clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE); + clear_tsk_thread_flag(dst, TIF_RISCV_V_KMV); return 0; } @@ -205,6 +210,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->a0 = 0; /* Return value of fork() */ p->thread.s[0] = 0; } + riscv_v_vstate_ctrl_config_kmv(true); p->thread.ra = (unsigned long)ret_from_fork; p->thread.sp = (unsigned long)childregs; /* kernel sp */ return 0; diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 9d583b760db4..42f227077ee5 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -122,7 +122,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt, ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt); if (inherit) ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT; - tsk->thread.vstate_ctrl = ctrl; + tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK; + tsk->thread.vstate_ctrl |= ctrl; } bool riscv_v_vstate_ctrl_user_allowed(void)
Add kernel_vstate to keep track of kernel-mode Vector registers when trap introduced context switch happens. Also, provide trap_pt_regs to let context save/restore routine reference status.VS at which the trap takes place. The thread flag TIF_RISCV_V_KMV indicates whether a task is running in kernel-mode Vector with preemption 'ON'. So context switch routines know and would save V-regs to kernel_vstate and restore V-regs immediately from kernel_vstate if the bit is set. Apart from a task's preemption status, the capability of running preemptive kernel-mode Vector is jointly controlled by the RISCV_V_VSTATE_CTRL_KMV_PREEMPTIBLE mask in the task's thread.vstate_ctrl. This bit is masked whenever a trap takes place in kernel mode while executing preemptive Vector code. Signed-off-by: Andy Chiu <andy.chiu@sifive.com> --- arch/riscv/include/asm/processor.h | 2 + arch/riscv/include/asm/thread_info.h | 4 ++ arch/riscv/include/asm/vector.h | 27 ++++++++++-- arch/riscv/kernel/asm-offsets.c | 2 + arch/riscv/kernel/entry.S | 41 ++++++++++++++++++ arch/riscv/kernel/kernel_mode_vector.c | 57 ++++++++++++++++++++++++-- arch/riscv/kernel/process.c | 8 +++- arch/riscv/kernel/vector.c | 3 +- 8 files changed, 136 insertions(+), 8 deletions(-)