@@ -1302,7 +1302,7 @@ static void cf_check init_amd(struct cpu
amd_log_freq(c);
}
-const struct cpu_dev amd_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber amd_cpu_dev = {
.c_early_init = early_init_amd,
.c_init = init_amd,
};
@@ -54,6 +54,6 @@ static void cf_check init_centaur(struct
init_c3(c);
}
-const struct cpu_dev centaur_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber centaur_cpu_dev = {
.c_init = init_centaur,
};
@@ -113,10 +113,10 @@ static void cf_check default_init(struct
__clear_bit(X86_FEATURE_SEP, c->x86_capability);
}
-static const struct cpu_dev default_cpu = {
+static const struct cpu_dev __initconst_cf_clobber default_cpu = {
.c_init = default_init,
};
-static const struct cpu_dev *this_cpu = &default_cpu;
+static struct cpu_dev __ro_after_init actual_cpu;
static DEFINE_PER_CPU(uint64_t, msr_misc_features);
void (* __read_mostly ctxt_switch_masking)(const struct vcpu *next);
@@ -336,12 +336,13 @@ void __init early_cpu_init(bool verbose)
c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
switch (c->x86_vendor) {
- case X86_VENDOR_INTEL: this_cpu = &intel_cpu_dev; break;
- case X86_VENDOR_AMD: this_cpu = &amd_cpu_dev; break;
- case X86_VENDOR_CENTAUR: this_cpu = ¢aur_cpu_dev; break;
- case X86_VENDOR_SHANGHAI: this_cpu = &shanghai_cpu_dev; break;
- case X86_VENDOR_HYGON: this_cpu = &hygon_cpu_dev; break;
+ case X86_VENDOR_INTEL: actual_cpu = intel_cpu_dev; break;
+ case X86_VENDOR_AMD: actual_cpu = amd_cpu_dev; break;
+ case X86_VENDOR_CENTAUR: actual_cpu = centaur_cpu_dev; break;
+ case X86_VENDOR_SHANGHAI: actual_cpu = shanghai_cpu_dev; break;
+ case X86_VENDOR_HYGON: actual_cpu = hygon_cpu_dev; break;
default:
+ actual_cpu = default_cpu;
if (!verbose)
break;
printk(XENLOG_ERR
@@ -448,8 +449,8 @@ static void generic_identify(struct cpui
if (c->extended_cpuid_level >= 0x80000021)
c->x86_capability[FEATURESET_e21a] = cpuid_eax(0x80000021);
- if (this_cpu->c_early_init)
- this_cpu->c_early_init(c);
+ if (actual_cpu.c_early_init)
+ alternative_vcall(actual_cpu.c_early_init, c);
/* c_early_init() may have adjusted cpuid levels/features. Reread. */
c->cpuid_level = cpuid_eax(0);
@@ -546,9 +547,8 @@ void identify_cpu(struct cpuinfo_x86 *c)
* At the end of this section, c->x86_capability better
* indicate the features this CPU genuinely supports!
*/
- if (this_cpu->c_init)
- this_cpu->c_init(c);
-
+ if (actual_cpu.c_init)
+ alternative_vcall(actual_cpu.c_init, c);
/*
* The vendor-specific functions might have changed features. Now
@@ -87,7 +87,7 @@ static void cf_check init_hygon(struct c
amd_log_freq(c);
}
-const struct cpu_dev hygon_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber hygon_cpu_dev = {
.c_early_init = early_init_amd,
.c_init = init_hygon,
};
@@ -577,7 +577,7 @@ static void cf_check init_intel(struct c
setup_clear_cpu_cap(X86_FEATURE_CLWB);
}
-const struct cpu_dev intel_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber intel_cpu_dev = {
.c_early_init = early_init_intel,
.c_init = init_intel,
};
@@ -15,6 +15,6 @@ static void cf_check init_shanghai(struc
init_intel_cacheinfo(c);
}
-const struct cpu_dev shanghai_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber shanghai_cpu_dev = {
.c_init = init_shanghai,
};
While not performance critical, these hook invocations still want converting: This way all pre-filled struct cpu_dev instances can become __initconst_cf_clobber, thus allowing to eliminate further 8 ENDBR during the 2nd phase of alternatives patching (besides moving previously resident data to .init.*). Since all use sites need touching anyway, take the opportunity and also address a Misra C:2012 Rule 5.5 violation: Rename the this_cpu static variable. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- With LTO it might end up necessary to tag as __used more than just "default_cpu".