@@ -46,7 +46,8 @@ extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
- void *sve_state, unsigned int sve_vl);
+ void *sve_state, unsigned int sve_vl,
+ u64 *svcr);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
@@ -169,6 +169,7 @@ struct thread_struct {
u64 mte_ctrl;
#endif
u64 sctlr_user;
+ u64 svcr;
u64 tpidr2_el0;
};
@@ -82,6 +82,7 @@ int arch_dup_task_struct(struct task_struct *dst,
#define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
+#define TIF_SME 27 /* SME in use */
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -121,6 +121,7 @@
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
void *sve_state;
+ u64 *svcr;
unsigned int sve_vl;
};
@@ -359,6 +360,9 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
+ if (IS_ENABLED(CONFIG_ARM64_SME) && test_thread_flag(TIF_SME))
+ write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
+
if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) {
sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
sve_load_state(sve_pffr(¤t->thread),
@@ -390,6 +394,12 @@ static void fpsimd_save(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
+ if (IS_ENABLED(CONFIG_ARM64_SME) &&
+ test_thread_flag(TIF_SME)) {
+ u64 *svcr = last->svcr;
+ *svcr = read_sysreg_s(SYS_SVCR_EL0);
+ }
+
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
test_thread_flag(TIF_SVE)) {
if (WARN_ON(sve_get_vl() != last->sve_vl)) {
@@ -741,6 +751,10 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
sve_to_fpsimd(task);
+ if (system_supports_sme() && type == ARM64_VEC_SME)
+ task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK |
+ SYS_SVCR_EL0_ZA_MASK);
+
if (task == current)
put_cpu_fpsimd_context();
@@ -1404,6 +1418,7 @@ static void fpsimd_bind_task_to_cpu(void)
last->st = ¤t->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
last->sve_vl = task_get_sve_vl(current);
+ last->svcr = ¤t->thread.svcr;
current->thread.fpsimd_cpu = smp_processor_id();
if (system_supports_sve()) {
@@ -1418,7 +1433,7 @@ static void fpsimd_bind_task_to_cpu(void)
}
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
- unsigned int sve_vl)
+ unsigned int sve_vl, u64 *svcr)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -1427,6 +1442,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->svcr = svcr;
last->sve_state = sve_state;
last->sve_vl = sve_vl;
}
@@ -310,6 +310,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ dst->thread.svcr = 0;
+
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
@@ -109,9 +109,14 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ /*
+ * Currently we do not support SME guests so SVCR is
+ * always 0 and we just need a variable to point to.
+ */
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state,
- vcpu->arch.sve_max_vl);
+ vcpu->arch.sve_max_vl,
+ NULL);
clear_thread_flag(TIF_FOREIGN_FPSTATE);
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));