diff mbox series

[2/3] x86: Add support for CpuidUserDis

Message ID 20230505175705.18098-3-alejandro.vallejo@cloud.com (mailing list archive)
State Superseded
Headers show
Series Add CpuidUserDis support | expand

Commit Message

Alejandro Vallejo May 5, 2023, 5:57 p.m. UTC
Includes a refactor to move vendor-specific probes to vendor-specific
files. Furthermore, because CpuIdUserDis is reported in Cpuid itself,
the extended leaf containing that bit must be retrieved before calling
c_early_init()

Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
---
 xen/arch/x86/cpu/amd.c         | 29 ++++++++++++++++++-
 xen/arch/x86/cpu/common.c      | 51 ++++++++++++++++++----------------
 xen/arch/x86/cpu/intel.c       | 11 +++++++-
 xen/arch/x86/include/asm/amd.h |  1 +
 4 files changed, 66 insertions(+), 26 deletions(-)

Comments

Jan Beulich May 8, 2023, 9:17 a.m. UTC | #1
On 05.05.2023 19:57, Alejandro Vallejo wrote:
> Includes a refactor to move vendor-specific probes to vendor-specific
> files.

I wonder whether the refactoring parts wouldn't better be split off.

> @@ -363,6 +375,21 @@ static void __init noinline amd_init_levelling(void)
>  		ctxt_switch_masking = amd_ctxt_switch_masking;
>  }
>  
> +void amd_set_cpuid_user_dis(bool enable)
> +{
> +	const uint64_t msr_addr = MSR_K8_HWCR;

Nit: No MSR index is ever a 64-bit quantity. I'd recommend using MSR_K8_HWCR
directly in the two accesses below anyway, but otherwise the variable wants
to be "const unsigned int".

> +	const uint64_t bit = K8_HWCR_CPUID_USER_DIS;
> +	uint64_t val;
> +
> +	rdmsrl(msr_addr, val);
> +
> +	if (!!(val & bit) == enable)
> +		return;
> +
> +	val ^= bit;
> +	wrmsrl(msr_addr, val);
> +}
>[...]
> --- a/xen/arch/x86/cpu/intel.c
> +++ b/xen/arch/x86/cpu/intel.c
> @@ -226,8 +226,17 @@ static void cf_check intel_ctxt_switch_masking(const struct vcpu *next)
>   */
>  static void __init noinline intel_init_levelling(void)
>  {
> -	if (probe_cpuid_faulting())
> +	/* Intel Fam0f is old enough that probing for CPUID faulting support

Nit: Style (/* on its own line).

> +	 * introduces spurious #GP(0) when the appropriate MSRs are read,
> +	 * so skip it altogether. In the case where Xen is virtualized these
> +	 * MSRs may be emulated though, so we allow it in that case.
> +	 */
> +	if ((cpu_has_hypervisor || boot_cpu_data.x86 !=0xf) &&

Nit: Style (blanks around binary operators). I'd also suggest swapping both
sides of the ||, to have the commonly true case first.

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index caafe44740..9269015edd 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -271,8 +271,20 @@  static void __init noinline amd_init_levelling(void)
 {
 	const struct cpuidmask *m = NULL;
 
-	if (probe_cpuid_faulting())
+	/*
+	 * If there's support for CpuidUserDis or CPUID faulting then
+	 * we can skip levelling because CPUID accesses are trapped anyway.
+	 *
+	 * CPUID faulting is an Intel feature analogous to CpuidUserDis, so
+	 * that can only be present when Xen is itself virtualized (because
+	 * it can be emulated)
+	 */
+	if ((cpu_has_hypervisor && probe_cpuid_faulting()) ||
+	    boot_cpu_has(X86_FEATURE_CPUID_USER_DIS)) {
+		expected_levelling_cap |= LCAP_faulting;
+		levelling_caps |=  LCAP_faulting;
 		return;
+	}
 
 	probe_masking_msrs();
 
@@ -363,6 +375,21 @@  static void __init noinline amd_init_levelling(void)
 		ctxt_switch_masking = amd_ctxt_switch_masking;
 }
 
+void amd_set_cpuid_user_dis(bool enable)
+{
+	const uint64_t msr_addr = MSR_K8_HWCR;
+	const uint64_t bit = K8_HWCR_CPUID_USER_DIS;
+	uint64_t val;
+
+	rdmsrl(msr_addr, val);
+
+	if (!!(val & bit) == enable)
+		return;
+
+	val ^= bit;
+	wrmsrl(msr_addr, val);
+}
+
 /*
  * Check for the presence of an AMD erratum. Arguments are defined in amd.h 
  * for each known erratum. Return 1 if erratum is found.
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index edc4db1335..9bbb385db4 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -4,6 +4,7 @@ 
 #include <xen/param.h>
 #include <xen/smp.h>
 
+#include <asm/amd.h>
 #include <asm/cpu-policy.h>
 #include <asm/current.h>
 #include <asm/debugreg.h>
@@ -131,17 +132,6 @@  bool __init probe_cpuid_faulting(void)
 	uint64_t val;
 	int rc;
 
-	/*
-	 * Don't bother looking for CPUID faulting if we aren't virtualised on
-	 * AMD or Hygon hardware - it won't be present.  Likewise for Fam0F
-	 * Intel hardware.
-	 */
-	if (((boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
-	     ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
-	      boot_cpu_data.x86 == 0xf)) &&
-	    !cpu_has_hypervisor)
-		return false;
-
 	if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
 		raw_cpu_policy.platform_info.cpuid_faulting =
 			val & MSR_PLATFORM_INFO_CPUID_FAULTING;
@@ -155,8 +145,6 @@  bool __init probe_cpuid_faulting(void)
 		return false;
 	}
 
-	expected_levelling_cap |= LCAP_faulting;
-	levelling_caps |=  LCAP_faulting;
 	setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
 
 	return true;
@@ -179,8 +167,10 @@  static void set_cpuid_faulting(bool enable)
 void ctxt_switch_levelling(const struct vcpu *next)
 {
 	const struct domain *nextd = next ? next->domain : NULL;
+	bool enable_cpuid_faulting;
 
-	if (cpu_has_cpuid_faulting) {
+	if (cpu_has_cpuid_faulting ||
+	    boot_cpu_has(X86_FEATURE_CPUID_USER_DIS)) {
 		/*
 		 * No need to alter the faulting setting if we are switching
 		 * to idle; it won't affect any code running in idle context.
@@ -201,12 +191,18 @@  void ctxt_switch_levelling(const struct vcpu *next)
 		 * an interim escape hatch in the form of
 		 * `dom0=no-cpuid-faulting` to restore the older behaviour.
 		 */
-		set_cpuid_faulting(nextd && (opt_dom0_cpuid_faulting ||
-					     !is_control_domain(nextd) ||
-					     !is_pv_domain(nextd)) &&
-				   (is_pv_domain(nextd) ||
-				    next->arch.msrs->
-				    misc_features_enables.cpuid_faulting));
+		enable_cpuid_faulting = nextd && (opt_dom0_cpuid_faulting ||
+		                                  !is_control_domain(nextd) ||
+		                                  !is_pv_domain(nextd)) &&
+		                        (is_pv_domain(nextd) ||
+		                         next->arch.msrs->
+		                         misc_features_enables.cpuid_faulting);
+
+		if (cpu_has_cpuid_faulting)
+			set_cpuid_faulting(enable_cpuid_faulting);
+		else
+			amd_set_cpuid_user_dis(enable_cpuid_faulting);
+
 		return;
 	}
 
@@ -415,6 +411,17 @@  static void generic_identify(struct cpuinfo_x86 *c)
 	c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
 	c->phys_proc_id = c->apicid;
 
+	eax = cpuid_eax(0x80000000);
+	if ((eax >> 16) == 0x8000)
+		c->extended_cpuid_level = eax;
+
+	/*
+	 * These AMD-defined flags are out of place, but we need
+	 * them early for the CPUID faulting probe code
+	 */
+	if (c->extended_cpuid_level >= 0x80000021)
+		c->x86_capability[FEATURESET_e21a] = cpuid_eax(0x80000021);
+
 	if (this_cpu->c_early_init)
 		this_cpu->c_early_init(c);
 
@@ -431,10 +438,6 @@  static void generic_identify(struct cpuinfo_x86 *c)
 	     (cpuid_ecx(CPUID_PM_LEAF) & CPUID6_ECX_APERFMPERF_CAPABILITY) )
 		__set_bit(X86_FEATURE_APERFMPERF, c->x86_capability);
 
-	eax = cpuid_eax(0x80000000);
-	if ((eax >> 16) == 0x8000)
-		c->extended_cpuid_level = eax;
-
 	/* AMD-defined flags: level 0x80000001 */
 	if (c->extended_cpuid_level >= 0x80000001)
 		cpuid(0x80000001, &tmp, &tmp,
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 71fc1a1e18..7e5c657758 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -226,8 +226,17 @@  static void cf_check intel_ctxt_switch_masking(const struct vcpu *next)
  */
 static void __init noinline intel_init_levelling(void)
 {
-	if (probe_cpuid_faulting())
+	/* Intel Fam0f is old enough that probing for CPUID faulting support
+	 * introduces spurious #GP(0) when the appropriate MSRs are read,
+	 * so skip it altogether. In the case where Xen is virtualized these
+	 * MSRs may be emulated though, so we allow it in that case.
+	 */
+	if ((cpu_has_hypervisor || boot_cpu_data.x86 !=0xf) &&
+	    probe_cpuid_faulting()) {
+		expected_levelling_cap |= LCAP_faulting;
+		levelling_caps |=  LCAP_faulting;
 		return;
+	}
 
 	probe_masking_msrs();
 
diff --git a/xen/arch/x86/include/asm/amd.h b/xen/arch/x86/include/asm/amd.h
index a975d3de26..09ee52dc1c 100644
--- a/xen/arch/x86/include/asm/amd.h
+++ b/xen/arch/x86/include/asm/amd.h
@@ -155,5 +155,6 @@  extern bool amd_legacy_ssbd;
 extern bool amd_virt_spec_ctrl;
 bool amd_setup_legacy_ssbd(void);
 void amd_set_legacy_ssbd(bool enable);
+void amd_set_cpuid_user_dis(bool enable);
 
 #endif /* __AMD_H__ */