@@ -550,6 +550,13 @@ static void __init calculate_hvm_max_policy(void)
__clear_bit(X86_FEATURE_IBRSB, hvm_featureset);
__clear_bit(X86_FEATURE_IBRS, hvm_featureset);
}
+ else if ( boot_cpu_has(X86_FEATURE_AMD_SSBD) )
+ /*
+ * If SPEC_CTRL.SSBD is available VIRT_SPEC_CTRL.SSBD can be exposed
+ * and implemented using the former. Expose in the max policy only as
+ * the preference is for guests to use SPEC_CTRL.SSBD if available.
+ */
+ __set_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
/*
* With VT-x, some features are only supported by Xen if dedicated
@@ -1334,6 +1334,7 @@ static const uint32_t msrs_to_send[] = {
MSR_INTEL_MISC_FEATURES_ENABLES,
MSR_IA32_BNDCFGS,
MSR_IA32_XSS,
+ MSR_VIRT_SPEC_CTRL,
MSR_AMD64_DR0_ADDRESS_MASK,
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
@@ -291,6 +291,7 @@ struct vcpu_msrs
{
/*
* 0x00000048 - MSR_SPEC_CTRL
+ * 0xc001011f - MSR_VIRT_SPEC_CTRL (if X86_FEATURE_AMD_SSBD)
*
* For PV guests, this holds the guest kernel value. It is accessed on
* every entry/exit path.
@@ -306,6 +307,9 @@ struct vcpu_msrs
* We must clear/restore Xen's value before/after VMRUN to avoid unduly
* influencing the guest. In order to support "behind the guest's back"
* protections, we load this value (commonly 0) before VMRUN.
+ *
+ * Once of such "behind the guest's back" usages is setting SPEC_CTRL.SSBD
+ * if the guest sets VIRT_SPEC_CTRL.SSBD.
*/
struct {
uint32_t raw;
@@ -381,6 +381,13 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
? K8_HWCR_TSC_FREQ_SEL : 0;
break;
+ case MSR_VIRT_SPEC_CTRL:
+ if ( !cp->extd.virt_ssbd )
+ goto gp_fault;
+
+ *val = msrs->spec_ctrl.raw & SPEC_CTRL_SSBD;
+ break;
+
case MSR_AMD64_DE_CFG:
if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
goto gp_fault;
@@ -666,6 +673,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
wrmsr_tsc_aux(val);
break;
+ case MSR_VIRT_SPEC_CTRL:
+ if ( !cp->extd.virt_ssbd )
+ goto gp_fault;
+
+ /* Only supports SSBD bit, the rest are ignored. */
+ if ( val & SPEC_CTRL_SSBD )
+ msrs->spec_ctrl.raw |= SPEC_CTRL_SSBD;
+ else
+ msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
+ break;
+
case MSR_AMD64_DE_CFG:
/*
* OpenBSD 6.7 will panic if writing to DE_CFG triggers a #GP:
@@ -402,12 +402,13 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
* mitigation support for guests.
*/
#ifdef CONFIG_HVM
- printk(" Support for HVM VMs:%s%s%s%s%s\n",
+ printk(" Support for HVM VMs:%s%s%s%s%s%s\n",
(boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
boot_cpu_has(X86_FEATURE_MD_CLEAR) ||
opt_eager_fpu) ? "" : " None",
boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "",
+ boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_VIRT_SPEC_CTRL" : "",
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "",
opt_eager_fpu ? " EAGER_FPU" : "",
boot_cpu_has(X86_FEATURE_MD_CLEAR) ? " MD_CLEAR" : "");
@@ -265,7 +265,7 @@ XEN_CPUFEATURE(IBRS_SAME_MODE, 8*32+19) /*S IBRS provides same-mode protection
XEN_CPUFEATURE(NO_LMSL, 8*32+20) /*S EFER.LMSLE no longer supported. */
XEN_CPUFEATURE(AMD_PPIN, 8*32+23) /* Protected Processor Inventory Number */
XEN_CPUFEATURE(AMD_SSBD, 8*32+24) /*S MSR_SPEC_CTRL.SSBD available */
-XEN_CPUFEATURE(VIRT_SSBD, 8*32+25) /* MSR_VIRT_SPEC_CTRL.SSBD */
+XEN_CPUFEATURE(VIRT_SSBD, 8*32+25) /*! MSR_VIRT_SPEC_CTRL.SSBD */
XEN_CPUFEATURE(SSB_NO, 8*32+26) /*A Hardware not vulnerable to SSB */
XEN_CPUFEATURE(PSFD, 8*32+28) /*S MSR_SPEC_CTRL.PSFD */