@@ -464,6 +464,7 @@
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
+#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -2615,6 +2615,11 @@ static void __init srso_select_mitigation(void)
break;
case SRSO_CMD_SAFE_RET:
+ if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
+ pr_notice("CPU user/kernel transitions protected, falling back to IBPB-on-VMEXIT\n");
+ goto ibpb_on_vmexit;
+ }
+
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
/*
* Enable the return thunk for generated code
@@ -2658,6 +2663,7 @@ static void __init srso_select_mitigation(void)
}
break;
+ibpb_on_vmexit:
case SRSO_CMD_IBPB_ON_VMEXIT:
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
@@ -1270,6 +1270,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO),
+ VULNBL_AMD(0x1a, SRSO),
{}
};