@@ -769,6 +769,12 @@ static __always_inline bool system_supports_sme(void)
cpus_have_const_cap(ARM64_SME);
}
+static __always_inline bool system_supports_sme2(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SME) &&
+ cpus_have_const_cap(ARM64_SME2);
+}
+
static __always_inline bool system_supports_fa64(void)
{
return IS_ENABLED(CONFIG_ARM64_SME) &&
@@ -118,6 +118,7 @@ extern void za_load_state(void const *state);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
@@ -282,6 +282,8 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
@@ -2649,6 +2651,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.cpu_enable = fa64_kernel_enable,
},
+ {
+ .desc = "SME2",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SME2,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
+ .field_width = ID_AA64PFR1_EL1_SME_WIDTH,
+ .min_field_value = ID_AA64PFR1_EL1_SME_SME2,
+ .matches = has_cpuid_feature,
+ .cpu_enable = sme2_kernel_enable,
+ },
#endif /* CONFIG_ARM64_SME */
{
.desc = "WFx with timeout",
@@ -1298,6 +1298,17 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
isb();
}
+/*
+ * This must be called after sme_kernel_enable(), we rely on the
+ * feature table being sorted to ensure this.
+ */
+void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ /* Allow use of ZT0 */
+ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
+ SYS_SMCR_EL1);
+}
+
/*
* This must be called after sme_kernel_enable(), we rely on the
* feature table being sorted to ensure this.
@@ -50,6 +50,7 @@ MTE
MTE_ASYMM
SME
SME_FA64
+SME2
SPECTRE_V2
SPECTRE_V3A
SPECTRE_V4
Add basic feature detection for SME2, detecting that the feature is present and disabling traps for ZT0. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/include/asm/cpufeature.h | 6 ++++++ arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/kernel/cpufeature.c | 14 ++++++++++++++ arch/arm64/kernel/fpsimd.c | 11 +++++++++++ arch/arm64/tools/cpucaps | 1 + 5 files changed, 33 insertions(+)