@@ -742,7 +742,7 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c)
}
static struct ssbd_ls_cfg {
- bool locked;
+ spinlock_t lock;
unsigned int count;
} __cacheline_aligned *ssbd_ls_cfg;
static unsigned int __ro_after_init ssbd_max_cores;
@@ -776,46 +776,48 @@ bool __init amd_setup_legacy_ssbd(void)
if (!ssbd_ls_cfg)
return false;
- if (opt_ssbd)
- for (i = 0; i < ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS; i++)
- /* Set initial state, applies to any (hotplug) CPU. */
- ssbd_ls_cfg[i].count = boot_cpu_data.x86_num_siblings;
+ for (i = 0; i < ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS; i++) {
+ /* Set initial state, applies to any (hotplug) CPU. */
+ ssbd_ls_cfg[i].count = opt_ssbd ? boot_cpu_data.x86_num_siblings
+ : 0;
+ spin_lock_init(&ssbd_ls_cfg[i].lock);
+ }
return true;
}
-/*
- * Executed from GIF==0 context: avoid using BUG/ASSERT or other functionality
- * that relies on exceptions as those are not expected to run in GIF==0
- * context.
- */
-void amd_set_legacy_ssbd(bool enable)
+static void core_set_legacy_ssbd(bool enable)
{
const struct cpuinfo_x86 *c = ¤t_cpu_data;
struct ssbd_ls_cfg *status;
+ unsigned long flags;
if ((c->x86 != 0x17 && c->x86 != 0x18) || c->x86_num_siblings <= 1) {
- set_legacy_ssbd(c, enable);
+ BUG_ON(!set_legacy_ssbd(c, enable));
return;
}
+ BUG_ON(c->phys_proc_id >= AMD_FAM17H_MAX_SOCKETS);
+ BUG_ON(c->cpu_core_id >= ssbd_max_cores);
status = &ssbd_ls_cfg[c->phys_proc_id * ssbd_max_cores +
c->cpu_core_id];
- /*
- * Open code a very simple spinlock: this function is used with GIF==0
- * and different IF values, so would trigger the checklock detector.
- * Instead of trying to workaround the detector, use a very simple lock
- * implementation: it's better to reduce the amount of code executed
- * with GIF==0.
- */
- while (test_and_set_bool(status->locked))
- cpu_relax();
+ spin_lock_irqsave(&status->lock, flags);
status->count += enable ? 1 : -1;
+ ASSERT(status->count <= c->x86_num_siblings);
if (enable ? status->count == 1 : !status->count)
- set_legacy_ssbd(c, enable);
- barrier();
- write_atomic(&status->locked, false);
+ BUG_ON(!set_legacy_ssbd(c, enable));
+ spin_unlock_irqrestore(&status->lock, flags);
+}
+
+void amd_set_ssbd(bool enable)
+{
+ if ( cpu_has_virt_ssbd )
+ wrmsr(MSR_VIRT_SPEC_CTRL, enable ? SPEC_CTRL_SSBD : 0, 0);
+ else if ( amd_legacy_ssbd )
+ core_set_legacy_ssbd(enable);
+ else
+ ASSERT_UNREACHABLE();
}
/*
@@ -59,9 +59,6 @@ __UNLIKELY_END(nsvm_hap)
clgi
- ALTERNATIVE "", STR(call vmentry_virt_spec_ctrl), \
- X86_FEATURE_VIRT_SC_MSR_HVM
-
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
.macro svm_vmentry_spec_ctrl
@@ -131,8 +128,6 @@ __UNLIKELY_END(nsvm_hap)
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
- ALTERNATIVE "", STR(call vmexit_virt_spec_ctrl), \
- X86_FEATURE_VIRT_SC_MSR_HVM
/*
* STGI is executed unconditionally, and is sufficiently serialising
@@ -140,6 +135,7 @@ __UNLIKELY_END(nsvm_hap)
*/
stgi
GLOBAL(svm_stgi_label)
+
mov %rsp,%rdi
call svm_vmexit_handler
jmp .Lsvm_do_resume
@@ -973,6 +973,14 @@ static void cf_check svm_ctxt_switch_from(struct vcpu *v)
/* Resume use of ISTs now that the host TR is reinstated. */
enable_each_ist(idt_tables[cpu]);
+
+ /*
+ * Clear previous guest selection of SSBD if set. Note that SPEC_CTRL.SSBD
+ * is already cleared by svm_vmexit_spec_ctrl.
+ */
+ if ( v->domain->arch.cpuid->extd.virt_ssbd &&
+ (v->arch.msrs->virt_spec_ctrl.raw & SPEC_CTRL_SSBD) )
+ amd_set_ssbd(false);
}
static void cf_check svm_ctxt_switch_to(struct vcpu *v)
@@ -1000,6 +1008,11 @@ static void cf_check svm_ctxt_switch_to(struct vcpu *v)
if ( cpu_has_msr_tsc_aux )
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
+
+ /* Load SSBD if set by the guest. */
+ if ( v->domain->arch.cpuid->extd.virt_ssbd &&
+ (v->arch.msrs->virt_spec_ctrl.raw & SPEC_CTRL_SSBD) )
+ amd_set_ssbd(true);
}
static void noreturn cf_check svm_do_resume(void)
@@ -2518,6 +2531,10 @@ static void cf_check svm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
vmcb->spec_ctrl = val;
break;
+ case MSR_VIRT_SPEC_CTRL:
+ amd_set_ssbd(v->arch.msrs->virt_spec_ctrl.raw);
+ break;
+
default:
printk(XENLOG_G_ERR "%s(%pv, 0x%08x, 0x%016"PRIx64") Bad register\n",
__func__, v, reg, val);
@@ -3116,34 +3133,6 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
vmcb_set_vintr(vmcb, intr);
}
-/* Called with GIF=0. */
-void vmexit_virt_spec_ctrl(void)
-{
- unsigned int val = opt_ssbd ? SPEC_CTRL_SSBD : 0;
-
- if ( val == current->arch.msrs->virt_spec_ctrl.raw )
- return;
-
- if ( cpu_has_virt_ssbd )
- wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
- else
- amd_set_legacy_ssbd(val);
-}
-
-/* Called with GIF=0. */
-void vmentry_virt_spec_ctrl(void)
-{
- unsigned int val = current->arch.msrs->virt_spec_ctrl.raw;
-
- if ( val == (opt_ssbd ? SPEC_CTRL_SSBD : 0) )
- return;
-
- if ( cpu_has_virt_ssbd )
- wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
- else
- amd_set_legacy_ssbd(val);
-}
-
/*
* Local variables:
* mode: C
@@ -153,6 +153,6 @@ void amd_check_disable_c1e(unsigned int port, u8 value);
extern bool amd_legacy_ssbd;
bool amd_setup_legacy_ssbd(void);
-void amd_set_legacy_ssbd(bool enable);
+void amd_set_ssbd(bool enable);
#endif /* __AMD_H__ */
@@ -697,7 +697,14 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
}
else
+ {
msrs->virt_spec_ctrl.raw = val & SPEC_CTRL_SSBD;
+ /*
+ * Propagate the value to hardware, as it won't be context switched
+ * on vmentry.
+ */
+ goto set_reg;
+ }
break;
case MSR_AMD64_DE_CFG:
The current logic for AMD SSBD context switches it on every vm{entry,exit} if the Xen and guest selections don't match. This is expensive when not using SPEC_CTRL, and hence should be avoided as much as possible. When SSBD is not being set from SPEC_CTRL on AMD don't context switch at vm{entry,exit} and instead only context switch SSBD when switching vCPUs. This has the side effect of running Xen code with the guest selection of SSBD, which renders the ssbd option without effect (in a similar way to what already happens on Intel hardware). This fixes an issue with running C code in a GIF=0 region, that's problematic when using UBSAN or other instrumentation techniques. As a result of no longer running the code to set SSBD in a GIF=0 region the locking of amd_set_legacy_ssbd() can be done using normal spinlocks, and some more checks can be added to assure it works as intended. Finally it's also worth noticing that since the guest SSBD selection is no longer set on vmentry the VIRT_SPEC_MSR handling needs to propagate the value to the hardware as part of handling the wrmsr. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- xen/arch/x86/cpu/amd.c | 50 ++++++++++++++++++---------------- xen/arch/x86/hvm/svm/entry.S | 6 +--- xen/arch/x86/hvm/svm/svm.c | 45 ++++++++++++------------------ xen/arch/x86/include/asm/amd.h | 2 +- xen/arch/x86/msr.c | 7 +++++ 5 files changed, 52 insertions(+), 58 deletions(-)