@@ -1442,15 +1442,20 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
int mce_threshold_remove_device(unsigned int cpu)
{
+ struct threshold_bank **bp = this_cpu_read(threshold_banks);
unsigned int bank;
+ if (!bp)
+ return 0;
+
for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
threshold_remove_bank(cpu, bank);
}
- kfree(per_cpu(threshold_banks, cpu));
- per_cpu(threshold_banks, cpu) = NULL;
+ /* Clear the pointer before freeing the memory */
+ this_cpu_write(threshold_banks, NULL);
+ kfree(bp);
return 0;
}
@@ -1461,6 +1466,9 @@ int mce_threshold_create_device(unsigned int cpu)
struct threshold_bank **bp;
int err = 0;
+ if (!mce_flags.amd_threshold)
+ return 0;
+
bp = per_cpu(threshold_banks, cpu);
if (bp)
return 0;
@@ -1756,6 +1756,7 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
+ mce_flags.amd_threshold = 1;
if (mce_flags.smca) {
msr_ops.ctl = smca_ctl_reg;
@@ -148,7 +148,7 @@ struct mce_vendor_flags {
* Recovery. It indicates support for data poisoning in HW and deferred
* error interrupts.
*/
- succor : 1,
+ succor : 1,
/*
* (AMD) SMCA: This bit indicates support for Scalable MCA which expands
@@ -156,9 +156,12 @@ struct mce_vendor_flags {
* banks. Also, to accommodate the new banks and registers, the MCA
* register space is moved to a new MSR range.
*/
- smca : 1,
+ smca : 1,
- __reserved_0 : 61;
+ /* AMD-style error thresholding banks present. */
+ amd_threshold : 1,
+
+ __reserved_0 : 60;
};
extern struct mce_vendor_flags mce_flags;