diff mbox series

[v2,1/2] arm64/sme: Restore SMCR on exit from suspend

Message ID 20240203-arm64-sme-resume-v2-1-a1fbaddc4425@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64/sme: Fix handling of traps on resume | expand

Commit Message

Mark Brown Feb. 3, 2024, 1 p.m. UTC
The fields in SMCR_EL1 reset to an architecturally UNKNOWN value. Since we
do not otherwise manage the traps configured in this register at runtime we
need to reconfigure them after a suspend in case nothing else was kind
enough to preserve them for us.

The vector length will be restored as part of restoring the SME state for
the next SME using task.

Fixes: a1f4ccd25cc2 (arm64/sme: Provide Kconfig for SME)
Reported-by: Jackson Cooper-Driver <Jackson.Cooper-Driver@arm.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/include/asm/fpsimd.h |  2 ++
 arch/arm64/kernel/fpsimd.c      | 14 ++++++++++++++
 arch/arm64/kernel/suspend.c     |  3 +++
 3 files changed, 19 insertions(+)

Comments

Will Deacon Feb. 9, 2024, 4:46 p.m. UTC | #1
On Sat, Feb 03, 2024 at 01:00:40PM +0000, Mark Brown wrote:
> The fields in SMCR_EL1 reset to an architecturally UNKNOWN value. Since we
> do not otherwise manage the traps configured in this register at runtime we
> need to reconfigure them after a suspend in case nothing else was kind
> enough to preserve them for us.
> 
> The vector length will be restored as part of restoring the SME state for
> the next SME using task.
> 
> Fixes: a1f4ccd25cc2 (arm64/sme: Provide Kconfig for SME)
> Reported-by: Jackson Cooper-Driver <Jackson.Cooper-Driver@arm.com>
> Signed-off-by: Mark Brown <broonie@kernel.org>
> ---
>  arch/arm64/include/asm/fpsimd.h |  2 ++
>  arch/arm64/kernel/fpsimd.c      | 14 ++++++++++++++
>  arch/arm64/kernel/suspend.c     |  3 +++
>  3 files changed, 19 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
> index 50e5f25d3024..7780d343ef08 100644
> --- a/arch/arm64/include/asm/fpsimd.h
> +++ b/arch/arm64/include/asm/fpsimd.h
> @@ -386,6 +386,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
>  extern unsigned int sme_get_vl(void);
>  extern int sme_set_current_vl(unsigned long arg);
>  extern int sme_get_current_vl(void);
> +extern void sme_suspend_exit(void);
>  
>  /*
>   * Return how many bytes of memory are required to store the full SME
> @@ -421,6 +422,7 @@ static inline int sme_max_vl(void) { return 0; }
>  static inline int sme_max_virtualisable_vl(void) { return 0; }
>  static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
>  static inline int sme_get_current_vl(void) { return -EINVAL; }
> +static inline void sme_suspend_exit(void) { }
>  
>  static inline size_t sme_state_size(struct task_struct const *task)
>  {
> diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
> index a5dc6f764195..8d2a5824d5d3 100644
> --- a/arch/arm64/kernel/fpsimd.c
> +++ b/arch/arm64/kernel/fpsimd.c
> @@ -1311,6 +1311,20 @@ void __init sme_setup(void)
>  		get_sme_default_vl());
>  }
>  
> +void sme_suspend_exit(void)
> +{
> +	u64 smcr = 0;
> +
> +	if (!system_supports_sme())
> +		return;
> +
> +	if (system_supports_fa64())
> +		smcr |= SMCR_ELx_FA64;
> +
> +	write_sysreg_s(smcr, SYS_SMCR_EL1);
> +	write_sysreg_s(0, SYS_SMPRI_EL1);
> +}

Looking at the other places where we touch SMCR_EL1, it looks like we
always use a read-modify-write sequence. However, doesn't that mean we
inherit a bunch of unknown bits on cold boot? I'm basically wondering
whether we should be initialising these registers to a well-known value
earlier in the CPU init path, a bit like we do for the EL2 variants.

Will
Mark Brown Feb. 9, 2024, 5:25 p.m. UTC | #2
On Fri, Feb 09, 2024 at 04:46:48PM +0000, Will Deacon wrote:

> Looking at the other places where we touch SMCR_EL1, it looks like we
> always use a read-modify-write sequence. However, doesn't that mean we
> inherit a bunch of unknown bits on cold boot? I'm basically wondering
> whether we should be initialising these registers to a well-known value
> earlier in the CPU init path, a bit like we do for the EL2 variants.

That'd be safer from a future proofing point of view, yes - we've got a
similar pattern with ZCR as well from the looks of it.  I'll send a
separate series.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 50e5f25d3024..7780d343ef08 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -386,6 +386,7 @@  extern void sme_alloc(struct task_struct *task, bool flush);
 extern unsigned int sme_get_vl(void);
 extern int sme_set_current_vl(unsigned long arg);
 extern int sme_get_current_vl(void);
+extern void sme_suspend_exit(void);
 
 /*
  * Return how many bytes of memory are required to store the full SME
@@ -421,6 +422,7 @@  static inline int sme_max_vl(void) { return 0; }
 static inline int sme_max_virtualisable_vl(void) { return 0; }
 static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
 static inline int sme_get_current_vl(void) { return -EINVAL; }
+static inline void sme_suspend_exit(void) { }
 
 static inline size_t sme_state_size(struct task_struct const *task)
 {
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index a5dc6f764195..8d2a5824d5d3 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1311,6 +1311,20 @@  void __init sme_setup(void)
 		get_sme_default_vl());
 }
 
+void sme_suspend_exit(void)
+{
+	u64 smcr = 0;
+
+	if (!system_supports_sme())
+		return;
+
+	if (system_supports_fa64())
+		smcr |= SMCR_ELx_FA64;
+
+	write_sysreg_s(smcr, SYS_SMCR_EL1);
+	write_sysreg_s(0, SYS_SMPRI_EL1);
+}
+
 #endif /* CONFIG_ARM64_SME */
 
 static void sve_init_regs(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index eca4d0435211..eaaff94329cd 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -12,6 +12,7 @@ 
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/exec.h>
+#include <asm/fpsimd.h>
 #include <asm/mte.h>
 #include <asm/memory.h>
 #include <asm/mmu_context.h>
@@ -80,6 +81,8 @@  void notrace __cpu_suspend_exit(void)
 	 */
 	spectre_v4_enable_mitigation(NULL);
 
+	sme_suspend_exit();
+
 	/* Restore additional feature-specific configuration */
 	ptrauth_suspend_exit();
 }