@@ -687,6 +687,12 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
deferred_error_interrupt_enable(c);
}
+void mce_smca_cpu_init(void)
+{
+ mce_threshold_vector = amd_threshold_interrupt;
+ deferred_error_int_vector = amd_deferred_error_interrupt;
+}
+
/*
* DRAM ECC errors are reported in the Northbridge (bank 4) with
* Extended Error Code 8.
@@ -2243,6 +2243,9 @@ void cpu_mca_init(struct cpuinfo_x86 *c)
mce_flags.succor = cpu_feature_enabled(X86_FEATURE_SUCCOR);
mce_flags.smca = cpu_feature_enabled(X86_FEATURE_SMCA);
+ if (mce_flags.smca)
+ mce_smca_cpu_init();
+
rdmsrl(MSR_IA32_MCG_CAP, cap);
/* Use accurate RIP reporting if available. */
@@ -294,12 +294,14 @@ static __always_inline void smca_extract_err_addr(struct mce *m)
m->addr &= GENMASK_ULL(55, lsb);
}
+void mce_smca_cpu_init(void);
#else
static inline void mce_threshold_create_device(unsigned int cpu) { }
static inline void mce_threshold_remove_device(unsigned int cpu) { }
static inline bool amd_filter_mce(struct mce *m) { return false; }
static inline bool amd_mce_usable_address(struct mce *m) { return false; }
static inline void smca_extract_err_addr(struct mce *m) { }
+static inline void mce_smca_cpu_init(void) {}
#endif
#ifdef CONFIG_X86_ANCIENT_MCE
Currently on AMD systems, MCA interrupt handler functions are set during CPU init. However, the functions only need to be set once for the whole system. Assign the handlers only during BSP init. Do so only for SMCA systems to maintain the old behavior for legacy systems. Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com> --- Notes: Link: https://lore.kernel.org/r/20250213-wip-mca-updates-v2-8-3636547fe05f@amd.com v2->v3: * No change. v1->v2: * New in v2. arch/x86/kernel/cpu/mce/amd.c | 6 ++++++ arch/x86/kernel/cpu/mce/core.c | 3 +++ arch/x86/kernel/cpu/mce/internal.h | 2 ++ 3 files changed, 11 insertions(+)