@@ -299,7 +299,7 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
WARNING: this function is only called on the BP. Don't add code here
that is supposed to run on all CPUs. */
-void __init early_cpu_init(void)
+void __init early_cpu_init(bool verbose)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
u32 eax, ebx, ecx, edx;
@@ -320,6 +320,8 @@ void __init early_cpu_init(void)
case X86_VENDOR_SHANGHAI: this_cpu = &shanghai_cpu_dev; break;
case X86_VENDOR_HYGON: this_cpu = &hygon_cpu_dev; break;
default:
+ if (!verbose)
+ break;
printk(XENLOG_ERR
"Unrecognised or unsupported CPU vendor '%.12s'\n",
c->x86_vendor_id);
@@ -336,10 +338,13 @@ void __init early_cpu_init(void)
c->x86_capability[FEATURESET_1d] = edx;
c->x86_capability[FEATURESET_1c] = ecx;
- printk(XENLOG_INFO
- "CPU Vendor: %s, Family %u (%#x), Model %u (%#x), Stepping %u (raw %08x)\n",
- x86_cpuid_vendor_to_str(c->x86_vendor), c->x86, c->x86,
- c->x86_model, c->x86_model, c->x86_mask, eax);
+ if (verbose)
+ printk(XENLOG_INFO
+ "CPU Vendor: %s, Family %u (%#x), "
+ "Model %u (%#x), Stepping %u (raw %08x)\n",
+ x86_cpuid_vendor_to_str(c->x86_vendor), c->x86,
+ c->x86, c->x86_model, c->x86_model, c->x86_mask,
+ eax);
if (c->cpuid_level >= 7) {
uint32_t max_subleaf;
@@ -348,6 +353,11 @@ void __init early_cpu_init(void)
&c->x86_capability[FEATURESET_7c0],
&c->x86_capability[FEATURESET_7d0]);
+ if (test_bit(X86_FEATURE_ARCH_CAPS, c->x86_capability))
+ rdmsr(MSR_ARCH_CAPABILITIES,
+ c->x86_capability[FEATURESET_m10Al],
+ c->x86_capability[FEATURESET_m10Ah]);
+
if (max_subleaf >= 1)
cpuid_count(7, 1, &eax, &ebx, &ecx,
&c->x86_capability[FEATURESET_7d1]);
@@ -887,5 +887,14 @@ int __init early_microcode_init(unsigned long *module_map,
if ( ucode_mod.mod_end || ucode_blob.size )
rc = early_microcode_update_cpu();
+ /*
+ * Some CPUID leaves and MSRs are only present after microcode updates
+ * on some processors. We take the chance here to make sure what little
+ * state we have already probed is re-probed in order to ensure we do
+ * not use stale values. tsx_init() in particular needs to have up to
+ * date MSR_ARCH_CAPS.
+ */
+ early_cpu_init(false);
+
return rc;
}
@@ -15,7 +15,7 @@ extern uint64_t boot_tsc_stamp;
extern void *stack_start;
-void early_cpu_init(void);
+void early_cpu_init(bool verbose);
void early_time_init(void);
void set_nr_cpu_ids(unsigned int max_cpus);
@@ -1213,7 +1213,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
panic("Bootloader provided no memory information\n");
/* This must come before e820 code because it sets paddr_bits. */
- early_cpu_init();
+ early_cpu_init(true);
/* Choose shadow stack early, to set infrastructure up appropriately. */
if ( !boot_cpu_has(X86_FEATURE_CET_SS) )
@@ -39,9 +39,10 @@ void tsx_init(void)
static bool __read_mostly once;
/*
- * This function is first called between microcode being loaded, and CPUID
- * being scanned generally. Read into boot_cpu_data.x86_capability[] for
- * the cpu_has_* bits we care about using here.
+ * This function is first called between microcode being loaded, and
+ * CPUID being scanned generally. early_cpu_init() has already prepared
+ * the feature bits needed here. And early_microcode_init() has ensured
+ * they are not stale after the microcode update.
*/
if ( unlikely(!once) )
{
@@ -49,15 +50,6 @@ void tsx_init(void)
once = true;
- if ( boot_cpu_data.cpuid_level >= 7 )
- boot_cpu_data.x86_capability[FEATURESET_7d0]
- = cpuid_count_edx(7, 0);
-
- if ( cpu_has_arch_caps )
- rdmsr(MSR_ARCH_CAPABILITIES,
- boot_cpu_data.x86_capability[FEATURESET_m10Al],
- boot_cpu_data.x86_capability[FEATURESET_m10Ah]);
-
has_rtm_always_abort = cpu_has_rtm_always_abort;
if ( cpu_has_tsx_ctrl && cpu_has_srbds_ctrl )