@@ -614,6 +614,8 @@ static __always_inline void mds_idle_clear_cpu_buffers(void)
mds_clear_cpu_buffers();
}
+extern void fill_return_buffer(void);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
@@ -6635,6 +6635,7 @@ int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return ret;
}
+/* Must be reentrant, for use by vmx_post_asi_enter. */
static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{
/*
@@ -46,6 +46,8 @@ EXPORT_SYMBOL(l1tf_flush_setup);
* - may or may not work on other CPUs.
*
* Don't call unless l1tf_flush_setup() has returned successfully.
+ *
+ * Must be reentrant, for use by ASI.
*/
noinstr void l1tf_flush(void)
{
@@ -396,3 +396,13 @@ SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_MITIGATION_RETHUNK */
+
+.pushsection .noinstr.text, "ax"
+SYM_CODE_START(fill_return_buffer)
+ UNWIND_HINT_FUNC
+ ENDBR
+ __FILL_RETURN_BUFFER(%_ASM_AX,RSB_CLEAR_LOOPS)
+ RET
+SYM_CODE_END(fill_return_buffer)
+__EXPORT_THUNK(fill_return_buffer)
+.popsection
@@ -10,6 +10,7 @@
#include <asm/cmdline.h>
#include <asm/cpufeature.h>
+#include <asm/l1tf.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -38,6 +39,8 @@ struct asi __asi_global_nonsensitive = {
.mm = &init_mm,
};
+static bool do_l1tf_flush __ro_after_init;
+
static inline bool asi_class_id_valid(enum asi_class_id class_id)
{
return class_id >= 0 && class_id < ASI_MAX_NUM_CLASSES;
@@ -361,6 +364,15 @@ static int __init asi_global_init(void)
asi_clone_pgd(asi_global_nonsensitive_pgd, init_mm.pgd,
VMEMMAP_START + (1UL << PGDIR_SHIFT));
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ int err = l1tf_flush_setup();
+
+ if (err)
+ pr_warn("Failed to setup L1TF flushing for ASI (%pe)", ERR_PTR(err));
+ else
+ do_l1tf_flush = true;
+ }
+
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&asi_syscore_ops);
#endif
@@ -512,10 +524,12 @@ static __always_inline void maybe_flush_control(struct asi *next_asi)
if (!taints)
return;
- /*
- * This is where we'll do the actual dirty work of clearing uarch state.
- * For now we just pretend, clear the taints.
- */
+ /* Clear normal indirect branch predictions, if we haven't */
+ if (cpu_feature_enabled(X86_FEATURE_IBPB))
+ __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
+
+ fill_return_buffer();
+
this_cpu_and(asi_taints, ~ASI_TAINTS_CONTROL_MASK);
}
@@ -536,10 +550,9 @@ static __always_inline void maybe_flush_data(struct asi *next_asi)
if (!taints)
return;
- /*
- * This is where we'll do the actual dirty work of clearing uarch state.
- * For now we just pretend, clear the taints.
- */
+ if (do_l1tf_flush)
+ l1tf_flush();
+
this_cpu_and(asi_taints, ~ASI_TAINTS_DATA_MASK);
}
Here we ASI actually starts becoming a real exploit mitigation, On CPUs with L1TF, flush L1D when the ASI data taints say so. On all CPUs, do some general branch predictor clearing whenever the control taints say so. This policy is very much just a starting point for discussion. Primarily it's a vague gesture at the fact that there is leeway in how ASI is used: it can be used to target CPU-specific issues (as is the case for L1TF here), or it can be used as a fairly broad mitigation (asi_maybe_flush_control() mitigates several known Spectre-style attacks and very likely also some unknown ones). Signed-off-by: Brendan Jackman <jackmanb@google.com> --- arch/x86/include/asm/nospec-branch.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/lib/l1tf.c | 2 ++ arch/x86/lib/retpoline.S | 10 ++++++++++ arch/x86/mm/asi.c | 29 +++++++++++++++++++++-------- 5 files changed, 36 insertions(+), 8 deletions(-)