@@ -2372,10 +2372,12 @@ By default, Xen will use STIBP when IBRS is in use (IBRS implies STIBP), and
when hardware hints recommend using it as a blanket setting.
On hardware supporting SSBD (Speculative Store Bypass Disable), the `ssbd=`
-option can be used to force or prevent Xen using the feature itself. On AMD
-hardware, this is a global option applied at boot, and not virtualised for
-guest use. On Intel hardware, the feature is virtualised for guests,
-independently of Xen's choice of setting.
+option can be used to force or prevent Xen using the feature itself. The
+feature is virtualised for guests, independently of Xen's choice of setting.
+On AMD hardware, disabling Xen SSBD usage on the command line (`ssbd=0` which
+is the default value) can lead to Xen running with the guest SSBD selection
+depending on hardware support, on the same hardware setting `ssbd=1` will
+result in SSBD always being enabled, regardless of guest choice.
On hardware supporting PSFD (Predictive Store Forwarding Disable), the `psfd=`
option can be used to force or prevent Xen using the feature itself. By
@@ -742,7 +742,7 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c)
}
static struct ssbd_ls_cfg {
- bool locked;
+ spinlock_t lock;
unsigned int count;
} __cacheline_aligned *ssbd_ls_cfg;
static unsigned int __ro_after_init ssbd_max_cores;
@@ -753,7 +753,7 @@ bool __init amd_setup_legacy_ssbd(void)
unsigned int i;
if ((boot_cpu_data.x86 != 0x17 && boot_cpu_data.x86 != 0x18) ||
- boot_cpu_data.x86_num_siblings <= 1)
+ boot_cpu_data.x86_num_siblings <= 1 || opt_ssbd)
return true;
/*
@@ -776,46 +776,51 @@ bool __init amd_setup_legacy_ssbd(void)
if (!ssbd_ls_cfg)
return false;
- if (opt_ssbd)
- for (i = 0; i < ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS; i++)
- /* Set initial state, applies to any (hotplug) CPU. */
- ssbd_ls_cfg[i].count = boot_cpu_data.x86_num_siblings;
+ for (i = 0; i < ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS; i++)
+ spin_lock_init(&ssbd_ls_cfg[i].lock);
return true;
}
-/*
- * Executed from GIF==0 context: avoid using BUG/ASSERT or other functionality
- * that relies on exceptions as those are not expected to run in GIF==0
- * context.
- */
-void amd_set_legacy_ssbd(bool enable)
+static void core_set_legacy_ssbd(bool enable)
{
const struct cpuinfo_x86 *c = ¤t_cpu_data;
struct ssbd_ls_cfg *status;
+ unsigned long flags;
if ((c->x86 != 0x17 && c->x86 != 0x18) || c->x86_num_siblings <= 1) {
- set_legacy_ssbd(c, enable);
+ BUG_ON(!set_legacy_ssbd(c, enable));
return;
}
+ BUG_ON(c->phys_proc_id >= AMD_FAM17H_MAX_SOCKETS);
+ BUG_ON(c->cpu_core_id >= ssbd_max_cores);
status = &ssbd_ls_cfg[c->phys_proc_id * ssbd_max_cores +
c->cpu_core_id];
- /*
- * Open code a very simple spinlock: this function is used with GIF==0
- * and different IF values, so would trigger the checklock detector.
- * Instead of trying to workaround the detector, use a very simple lock
- * implementation: it's better to reduce the amount of code executed
- * with GIF==0.
- */
- while (test_and_set_bool(status->locked))
- cpu_relax();
+ spin_lock_irqsave(&status->lock, flags);
status->count += enable ? 1 : -1;
+ ASSERT(status->count <= c->x86_num_siblings);
if (enable ? status->count == 1 : !status->count)
- set_legacy_ssbd(c, enable);
- barrier();
- write_atomic(&status->locked, false);
+ BUG_ON(!set_legacy_ssbd(c, enable));
+ spin_unlock_irqrestore(&status->lock, flags);
+}
+
+void amd_set_legacy_ssbd(bool enable)
+{
+ if (opt_ssbd)
+ /*
+ * Ignore attempts to turn off SSBD, it's hardcoded on the
+ * command line.
+ */
+ return;
+
+ if (cpu_has_virt_ssbd)
+ wrmsr(MSR_VIRT_SPEC_CTRL, enable ? SPEC_CTRL_SSBD : 0, 0);
+ else if (amd_legacy_ssbd)
+ core_set_legacy_ssbd(enable);
+ else
+ ASSERT_UNREACHABLE();
}
/*
@@ -59,9 +59,6 @@ __UNLIKELY_END(nsvm_hap)
clgi
- ALTERNATIVE "", STR(call vmentry_virt_spec_ctrl), \
- X86_FEATURE_VIRT_SC_MSR_HVM
-
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
.macro svm_vmentry_spec_ctrl
@@ -131,9 +128,6 @@ __UNLIKELY_END(nsvm_hap)
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
- ALTERNATIVE "", STR(call vmexit_virt_spec_ctrl), \
- X86_FEATURE_VIRT_SC_MSR_HVM
-
/*
* STGI is executed unconditionally, and is sufficiently serialising
* to safely resolve any Spectre-v1 concerns in the above logic.
@@ -973,6 +973,16 @@ static void cf_check svm_ctxt_switch_from(struct vcpu *v)
/* Resume use of ISTs now that the host TR is reinstated. */
enable_each_ist(idt_tables[cpu]);
+
+ /*
+ * Possibly clear previous guest selection of SSBD if set. Note that
+ * SPEC_CTRL.SSBD is already handled by svm_vmexit_spec_ctrl.
+ */
+ if ( v->arch.msrs->virt_spec_ctrl.raw & SPEC_CTRL_SSBD )
+ {
+ ASSERT(v->domain->arch.cpuid->extd.virt_ssbd);
+ amd_set_legacy_ssbd(false);
+ }
}
static void cf_check svm_ctxt_switch_to(struct vcpu *v)
@@ -1000,6 +1010,13 @@ static void cf_check svm_ctxt_switch_to(struct vcpu *v)
if ( cpu_has_msr_tsc_aux )
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
+
+ /* Load SSBD if set by the guest. */
+ if ( v->arch.msrs->virt_spec_ctrl.raw & SPEC_CTRL_SSBD )
+ {
+ ASSERT(v->domain->arch.cpuid->extd.virt_ssbd);
+ amd_set_legacy_ssbd(true);
+ }
}
static void noreturn cf_check svm_do_resume(void)
@@ -3116,34 +3133,6 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
vmcb_set_vintr(vmcb, intr);
}
-/* Called with GIF=0. */
-void vmexit_virt_spec_ctrl(void)
-{
- unsigned int val = opt_ssbd ? SPEC_CTRL_SSBD : 0;
-
- if ( val == current->arch.msrs->virt_spec_ctrl.raw )
- return;
-
- if ( cpu_has_virt_ssbd )
- wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
- else
- amd_set_legacy_ssbd(val);
-}
-
-/* Called with GIF=0. */
-void vmentry_virt_spec_ctrl(void)
-{
- unsigned int val = current->arch.msrs->virt_spec_ctrl.raw;
-
- if ( val == (opt_ssbd ? SPEC_CTRL_SSBD : 0) )
- return;
-
- if ( cpu_has_virt_ssbd )
- wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
- else
- amd_set_legacy_ssbd(val);
-}
-
/*
* Local variables:
* mode: C
@@ -378,8 +378,7 @@ struct vcpu_msrs
/*
* 0xc001011f - MSR_VIRT_SPEC_CTRL (if !X86_FEATURE_AMD_SSBD)
*
- * AMD only. Guest selected value, context switched on guest VM
- * entry/exit.
+ * AMD only. Guest selected value.
*/
struct {
uint32_t raw;
@@ -24,6 +24,7 @@
#include <xen/nospec.h>
#include <xen/sched.h>
+#include <asm/amd.h>
#include <asm/debugreg.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/viridian.h>
@@ -697,7 +698,15 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
}
else
+ {
msrs->virt_spec_ctrl.raw = val & SPEC_CTRL_SSBD;
+ if ( v == curr )
+ /*
+ * Propagate the value to hardware, as it won't be set on guest
+ * resume path.
+ */
+ amd_set_legacy_ssbd(val & SPEC_CTRL_SSBD);
+ }
break;
case MSR_AMD64_DE_CFG: