Message ID | 20230414062545.270178-6-chao.gao@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Intel IA32_SPEC_CTRL Virtualization | expand |
On 4/14/2023 2:25 PM, Chao Gao wrote: > From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> > > Guests that have different family/model than the host may not be aware > of hardware mitigations(such as RRSBA_DIS_S) available on host. This is > particularly true when guests migrate. To solve this problem Intel > processors have added a virtual MSR interface through which guests can > report their mitigation status and request VMM to deploy relevant > hardware mitigations. > > Use this virtualized MSR interface to request relevant hardware controls > for retpoline mitigation. > > Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> > Co-developed-by: Zhang Chen <chen.zhang@intel.com> > Signed-off-by: Zhang Chen <chen.zhang@intel.com> > Signed-off-by: Chao Gao <chao.gao@intel.com> > Tested-by: Jiaan Lu <jiaan.lu@intel.com> > --- > arch/x86/include/asm/msr-index.h | 25 +++++++++++++++++++++++++ > arch/x86/kernel/cpu/bugs.c | 25 +++++++++++++++++++++++++ > 2 files changed, 50 insertions(+) > > diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h > index 60b25d87b82c..aec213f0c6fc 100644 > --- a/arch/x86/include/asm/msr-index.h > +++ b/arch/x86/include/asm/msr-index.h > @@ -166,6 +166,7 @@ > * IA32_XAPIC_DISABLE_STATUS MSR > * supported > */ > +#define ARCH_CAP_VIRTUAL_ENUM BIT_ULL(63) /* MSR_VIRTUAL_ENUMERATION supported */ > > #define MSR_IA32_FLUSH_CMD 0x0000010b > #define L1D_FLUSH BIT(0) /* > @@ -1103,6 +1104,30 @@ > #define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) > #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) > #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F > + > +/* Intel virtual MSRs */ > +#define MSR_VIRTUAL_ENUMERATION 0x50000000 > +#define VIRT_ENUM_MITIGATION_CTRL_SUPPORT BIT(0) /* > + * Mitigation ctrl via virtual > + * MSRs supported > + */ > + > +#define MSR_VIRTUAL_MITIGATION_ENUM 0x50000001 > +#define MITI_ENUM_BHB_CLEAR_SEQ_S_SUPPORT BIT(0) /* VMM supports BHI_DIS_S */ > +#define MITI_ENUM_RETPOLINE_S_SUPPORT BIT(1) /* VMM supports RRSBA_DIS_S */ > + > +#define MSR_VIRTUAL_MITIGATION_CTRL 0x50000002 > +#define MITI_CTRL_BHB_CLEAR_SEQ_S_USED_BIT 0 /* > + * Request VMM to deploy > + * BHI_DIS_S mitigation > + */ > +#define MITI_CTRL_BHB_CLEAR_SEQ_S_USED BIT(MITI_CTRL_BHB_CLEAR_SEQ_S_USED_BIT) Seems it is defined, but not used to request VMM to deploy BHI_DIS_S mitigation? And IMO, it is more natual to put this patch after the four capability advertising patches. > +#define MITI_CTRL_RETPOLINE_S_USED_BIT 1 /* > + * Request VMM to deploy > + * RRSBA_DIS_S mitigation > + */ > +#define MITI_CTRL_RETPOLINE_S_USED BIT(MITI_CTRL_RETPOLINE_S_USED_BIT) > + > /* AMD-V MSRs */ > > #define MSR_VM_CR 0xc0010114 > diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c > index f9d060e71c3e..5326c03d9d5e 100644 > --- a/arch/x86/kernel/cpu/bugs.c > +++ b/arch/x86/kernel/cpu/bugs.c > @@ -1435,6 +1435,27 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ > dump_stack(); > } > > +/* Speculation control using virtualized MSRs */ > +static void spec_ctrl_setup_virtualized_msr(void) > +{ > + u64 msr_virt_enum, msr_mitigation_enum; > + > + /* When retpoline is being used, request relevant hardware controls */ > + if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) > + return; > + > + if (!(x86_read_arch_cap_msr() & ARCH_CAP_VIRTUAL_ENUM)) > + return; > + > + rdmsrl(MSR_VIRTUAL_ENUMERATION, msr_virt_enum); > + if (!(msr_virt_enum & VIRT_ENUM_MITIGATION_CTRL_SUPPORT)) > + return; > + > + rdmsrl(MSR_VIRTUAL_MITIGATION_ENUM, msr_mitigation_enum); > + if (msr_mitigation_enum & MITI_ENUM_RETPOLINE_S_SUPPORT) > + msr_set_bit(MSR_VIRTUAL_MITIGATION_CTRL, MITI_CTRL_RETPOLINE_S_USED_BIT); > +} > + > static void __init spectre_v2_select_mitigation(void) > { > enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); > @@ -1546,6 +1567,8 @@ static void __init spectre_v2_select_mitigation(void) > mode == SPECTRE_V2_RETPOLINE) > spec_ctrl_disable_kernel_rrsba(); > > + spec_ctrl_setup_virtualized_msr(); > + > spectre_v2_enabled = mode; > pr_info("%s\n", spectre_v2_strings[mode]); > > @@ -2115,6 +2138,8 @@ void x86_spec_ctrl_setup_ap(void) > > if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) > x86_amd_ssb_disable(); > + > + spec_ctrl_setup_virtualized_msr(); > } > > bool itlb_multihit_kvm_mitigation;
On Mon, Apr 17, 2023 at 09:43:59PM +0800, Binbin Wu wrote: > >On 4/14/2023 2:25 PM, Chao Gao wrote: >> From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> >> >> Guests that have different family/model than the host may not be aware >> of hardware mitigations(such as RRSBA_DIS_S) available on host. This is >> particularly true when guests migrate. To solve this problem Intel >> processors have added a virtual MSR interface through which guests can >> report their mitigation status and request VMM to deploy relevant >> hardware mitigations. >> >> Use this virtualized MSR interface to request relevant hardware controls >> for retpoline mitigation. >> >> Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> >> Co-developed-by: Zhang Chen <chen.zhang@intel.com> >> Signed-off-by: Zhang Chen <chen.zhang@intel.com> >> Signed-off-by: Chao Gao <chao.gao@intel.com> >> Tested-by: Jiaan Lu <jiaan.lu@intel.com> >> --- >> arch/x86/include/asm/msr-index.h | 25 +++++++++++++++++++++++++ >> arch/x86/kernel/cpu/bugs.c | 25 +++++++++++++++++++++++++ >> 2 files changed, 50 insertions(+) >> >> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h >> index 60b25d87b82c..aec213f0c6fc 100644 >> --- a/arch/x86/include/asm/msr-index.h >> +++ b/arch/x86/include/asm/msr-index.h >> @@ -166,6 +166,7 @@ >> * IA32_XAPIC_DISABLE_STATUS MSR >> * supported >> */ >> +#define ARCH_CAP_VIRTUAL_ENUM BIT_ULL(63) /* MSR_VIRTUAL_ENUMERATION supported */ >> #define MSR_IA32_FLUSH_CMD 0x0000010b >> #define L1D_FLUSH BIT(0) /* >> @@ -1103,6 +1104,30 @@ >> #define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) >> #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) >> #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F >> + >> +/* Intel virtual MSRs */ >> +#define MSR_VIRTUAL_ENUMERATION 0x50000000 >> +#define VIRT_ENUM_MITIGATION_CTRL_SUPPORT BIT(0) /* >> + * Mitigation ctrl via virtual >> + * MSRs supported >> + */ >> + >> +#define MSR_VIRTUAL_MITIGATION_ENUM 0x50000001 >> +#define MITI_ENUM_BHB_CLEAR_SEQ_S_SUPPORT BIT(0) /* VMM supports BHI_DIS_S */ >> +#define MITI_ENUM_RETPOLINE_S_SUPPORT BIT(1) /* VMM supports RRSBA_DIS_S */ >> + >> +#define MSR_VIRTUAL_MITIGATION_CTRL 0x50000002 >> +#define MITI_CTRL_BHB_CLEAR_SEQ_S_USED_BIT 0 /* >> + * Request VMM to deploy >> + * BHI_DIS_S mitigation >> + */ >> +#define MITI_CTRL_BHB_CLEAR_SEQ_S_USED BIT(MITI_CTRL_BHB_CLEAR_SEQ_S_USED_BIT) > >Seems it is defined, but not used to request VMM to deploy BHI_DIS_S >mitigation? Because Linux kernel doesn't use BHB-clearing sequence. Instead, "disable unprivileged eBPF by default" + SMAP + eIBRS are used. KVM uses this bit when checking if guests, which may not be running Linux, are using BHB-clearing sequence. > > >And IMO, it is more natual to put this patch after the four capability >advertising patches. Makes sense. I will organize the series in that order.
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 60b25d87b82c..aec213f0c6fc 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -166,6 +166,7 @@ * IA32_XAPIC_DISABLE_STATUS MSR * supported */ +#define ARCH_CAP_VIRTUAL_ENUM BIT_ULL(63) /* MSR_VIRTUAL_ENUMERATION supported */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -1103,6 +1104,30 @@ #define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F + +/* Intel virtual MSRs */ +#define MSR_VIRTUAL_ENUMERATION 0x50000000 +#define VIRT_ENUM_MITIGATION_CTRL_SUPPORT BIT(0) /* + * Mitigation ctrl via virtual + * MSRs supported + */ + +#define MSR_VIRTUAL_MITIGATION_ENUM 0x50000001 +#define MITI_ENUM_BHB_CLEAR_SEQ_S_SUPPORT BIT(0) /* VMM supports BHI_DIS_S */ +#define MITI_ENUM_RETPOLINE_S_SUPPORT BIT(1) /* VMM supports RRSBA_DIS_S */ + +#define MSR_VIRTUAL_MITIGATION_CTRL 0x50000002 +#define MITI_CTRL_BHB_CLEAR_SEQ_S_USED_BIT 0 /* + * Request VMM to deploy + * BHI_DIS_S mitigation + */ +#define MITI_CTRL_BHB_CLEAR_SEQ_S_USED BIT(MITI_CTRL_BHB_CLEAR_SEQ_S_USED_BIT) +#define MITI_CTRL_RETPOLINE_S_USED_BIT 1 /* + * Request VMM to deploy + * RRSBA_DIS_S mitigation + */ +#define MITI_CTRL_RETPOLINE_S_USED BIT(MITI_CTRL_RETPOLINE_S_USED_BIT) + /* AMD-V MSRs */ #define MSR_VM_CR 0xc0010114 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f9d060e71c3e..5326c03d9d5e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1435,6 +1435,27 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ dump_stack(); } +/* Speculation control using virtualized MSRs */ +static void spec_ctrl_setup_virtualized_msr(void) +{ + u64 msr_virt_enum, msr_mitigation_enum; + + /* When retpoline is being used, request relevant hardware controls */ + if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) + return; + + if (!(x86_read_arch_cap_msr() & ARCH_CAP_VIRTUAL_ENUM)) + return; + + rdmsrl(MSR_VIRTUAL_ENUMERATION, msr_virt_enum); + if (!(msr_virt_enum & VIRT_ENUM_MITIGATION_CTRL_SUPPORT)) + return; + + rdmsrl(MSR_VIRTUAL_MITIGATION_ENUM, msr_mitigation_enum); + if (msr_mitigation_enum & MITI_ENUM_RETPOLINE_S_SUPPORT) + msr_set_bit(MSR_VIRTUAL_MITIGATION_CTRL, MITI_CTRL_RETPOLINE_S_USED_BIT); +} + static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -1546,6 +1567,8 @@ static void __init spectre_v2_select_mitigation(void) mode == SPECTRE_V2_RETPOLINE) spec_ctrl_disable_kernel_rrsba(); + spec_ctrl_setup_virtualized_msr(); + spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); @@ -2115,6 +2138,8 @@ void x86_spec_ctrl_setup_ap(void) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); + + spec_ctrl_setup_virtualized_msr(); } bool itlb_multihit_kvm_mitigation;