@@ -407,6 +407,11 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
c->cpu_core_id);
}
+static void early_init_amd(struct cpuinfo_x86 *c)
+{
+ set_cpuidmask(c);
+}
+
static void init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
@@ -595,14 +600,13 @@ static void init_amd(struct cpuinfo_x86 *c)
if ((smp_processor_id() == 1) && !cpu_has(c, X86_FEATURE_ITSC))
disable_c1_ramping();
- set_cpuidmask(c);
-
check_syscfg_dram_mod_en();
}
static const struct cpu_dev amd_cpu_dev = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
+ .c_early_init = early_init_amd,
.c_init = init_amd,
};
@@ -189,6 +189,23 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
(boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
paddr_bits = 36;
+
+ if (c == &boot_cpu_data && c->x86 == 6) {
+ if (probe_intel_cpuid_faulting())
+ __set_bit(X86_FEATURE_CPUID_FAULTING,
+ c->x86_capability);
+ } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
+ BUG_ON(!probe_intel_cpuid_faulting());
+ __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ }
+
+ if (!cpu_has_cpuid_faulting)
+ set_cpuidmask(c);
+ else if ((c == &boot_cpu_data) &&
+ (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+ opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
+ opt_cpuid_mask_xsave_eax)))
+ printk("No CPUID feature masking support available\n");
}
/*
@@ -258,23 +275,6 @@ static void init_intel(struct cpuinfo_x86 *c)
detect_ht(c);
}
- if (c == &boot_cpu_data && c->x86 == 6) {
- if (probe_intel_cpuid_faulting())
- __set_bit(X86_FEATURE_CPUID_FAULTING,
- c->x86_capability);
- } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
- BUG_ON(!probe_intel_cpuid_faulting());
- __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
- }
-
- if (!cpu_has_cpuid_faulting)
- set_cpuidmask(c);
- else if ((c == &boot_cpu_data) &&
- (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
- opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
- opt_cpuid_mask_xsave_eax)))
- printk("No CPUID feature masking support available\n");
-
/* Work around errata */
Intel_errata_workarounds(c);
Before c/s 44e24f8567 "x86: don't call generic_identify() redundantly", the commandline-provided masks would take effect in Xen's view of the features. As the masks got applied after the query for features, the redundant call to generic_identify() would clobber the pre-masking feature information with the post-masking information. Move the set_cpumask() calls into c_early_init() so their effects take place before the main query for features in generic_identify(). The cpuid_mask_* command line parameters now limit the entire system, a feature XenServer was relying on for testing purposes. Subsequent changes will cause the mask MSRs to be context switched per-domain, removing the need to use the command line parameters for heterogeneous levelling purposes. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> --- xen/arch/x86/cpu/amd.c | 8 ++++++-- xen/arch/x86/cpu/intel.c | 34 +++++++++++++++++----------------- 2 files changed, 23 insertions(+), 19 deletions(-)