@@ -40,6 +40,8 @@ cpucap_is_possible(const unsigned int cap)
return IS_ENABLED(CONFIG_ARM64_BTI);
case ARM64_HAS_TLB_RANGE:
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
+ case ARM64_WORKAROUND_2658417:
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_2658417);
}
return true;
@@ -121,22 +121,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
}
-static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
-static void __maybe_unused
-cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
-{
- struct arm64_ftr_reg *regp;
-
- regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
- if (!regp)
- return;
-
- raw_spin_lock(®_user_mask_modification);
- if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
- regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
- raw_spin_unlock(®_user_mask_modification);
-}
-
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -727,7 +711,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A510 r0p0 - r1p1 */
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
- .cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
@@ -2190,6 +2190,17 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
}
#endif /* CONFIG_ARM64_MTE */
+static void user_feature_fixup(void)
+{
+ if (cpus_have_cap(ARM64_WORKAROUND_2658417)) {
+ struct arm64_ftr_reg *regp;
+
+ regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+ if (regp)
+ regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
+ }
+}
+
static void elf_hwcap_fixup(void)
{
#ifdef CONFIG_ARM64_ERRATUM_1742098
@@ -3357,6 +3368,8 @@ void __init setup_system_features(void)
void __init setup_user_features(void)
{
+ user_feature_fixup();
+
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) {
For ARM64_WORKAROUND_2658417, we use a cpu_enable() callback to hide the ID_AA64ISAR1_EL1.BF16 ID register field. This is a little awkward as CPUs may attempt to apply the workaround concurrently, requiring that we protect the bulk of the callback with a raw_spinlock, and requiring some pointless work every time a CPU is subsequently hotplugged in. This patch makes this a little simpler by handling the masking once at boot time. A new user_feature_fixup() function is called at the start of setup_user_features() to mask the feature, matching the style of elf_hwcap_fixup(). The ARM64_WORKAROUND_2658417 cpucap is added to cpucap_is_possible() so that code can be elided entirely when this is not possible. Note that the ARM64_WORKAROUND_2658417 capability is matched with ERRATA_MIDR_RANGE(), which implicitly gives the capability a ARM64_CPUCAP_LOCAL_CPU_ERRATUM type, which forbids the late onlining of a CPU with the erratum if the erratum was not present at boot time. Therefore this patch doesn't change the behaviour for late onlining. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Mark Brown <broonie@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> --- arch/arm64/include/asm/cpucaps.h | 2 ++ arch/arm64/kernel/cpu_errata.c | 17 ----------------- arch/arm64/kernel/cpufeature.c | 13 +++++++++++++ 3 files changed, 15 insertions(+), 17 deletions(-)