Message ID | 20230524061634.54141-4-chao.gao@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | MSR_IA32_ARCH_CAPABILITIES cleanups | expand |
On 5/24/2023 2:16 PM, Chao Gao wrote: > KVM open-codes x86_read_arch_cap_msr() in a few places. Eliminate them > by exposing the helper function and using it directly. > > No functional change intended. > Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com> > Signed-off-by: Chao Gao <chao.gao@intel.com> > --- > arch/x86/kernel/cpu/common.c | 1 + > arch/x86/kvm/vmx/vmx.c | 19 +++++-------------- > arch/x86/kvm/x86.c | 7 +------ > 3 files changed, 7 insertions(+), 20 deletions(-) > > diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c > index 80710a68ef7d..b34dd3f3e6c4 100644 > --- a/arch/x86/kernel/cpu/common.c > +++ b/arch/x86/kernel/cpu/common.c > @@ -1315,6 +1315,7 @@ u64 x86_read_arch_cap_msr(void) > > return ia32_cap; > } > +EXPORT_SYMBOL_GPL(x86_read_arch_cap_msr); > > static bool arch_cap_mmio_immune(u64 ia32_cap) > { > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 8274ef5e89e5..3dec4a4f637e 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -255,14 +255,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) > return 0; > } > > - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { > - u64 msr; > - > - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); > - if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { > - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; > - return 0; > - } > + if (x86_read_arch_cap_msr() & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { > + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; > + return 0; > } > > /* If set to auto use the default l1tf mitigation method */ > @@ -394,13 +389,9 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) > > static void vmx_setup_fb_clear_ctrl(void) > { > - u64 msr; > - > - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && > - !boot_cpu_has_bug(X86_BUG_MDS) && > + if (!boot_cpu_has_bug(X86_BUG_MDS) && > !boot_cpu_has_bug(X86_BUG_TAA)) { > - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); > - if (msr & ARCH_CAP_FB_CLEAR_CTRL) > + if (x86_read_arch_cap_msr() & ARCH_CAP_FB_CLEAR_CTRL) > vmx_fb_clear_ctrl_available = true; > } > } > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index f7838260c183..b1bdb84ac10f 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1612,12 +1612,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr) > > static u64 kvm_get_arch_capabilities(void) > { > - u64 data = 0; > - > - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { > - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); > - data &= KVM_SUPPORTED_ARCH_CAP; > - } > + u64 data = x86_read_arch_cap_msr() & KVM_SUPPORTED_ARCH_CAP; > > /* > * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80710a68ef7d..b34dd3f3e6c4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1315,6 +1315,7 @@ u64 x86_read_arch_cap_msr(void) return ia32_cap; } +EXPORT_SYMBOL_GPL(x86_read_arch_cap_msr); static bool arch_cap_mmio_immune(u64 ia32_cap) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8274ef5e89e5..3dec4a4f637e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -255,14 +255,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) return 0; } - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { - u64 msr; - - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); - if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; - return 0; - } + if (x86_read_arch_cap_msr() & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; } /* If set to auto use the default l1tf mitigation method */ @@ -394,13 +389,9 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) static void vmx_setup_fb_clear_ctrl(void) { - u64 msr; - - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && - !boot_cpu_has_bug(X86_BUG_MDS) && + if (!boot_cpu_has_bug(X86_BUG_MDS) && !boot_cpu_has_bug(X86_BUG_TAA)) { - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); - if (msr & ARCH_CAP_FB_CLEAR_CTRL) + if (x86_read_arch_cap_msr() & ARCH_CAP_FB_CLEAR_CTRL) vmx_fb_clear_ctrl_available = true; } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f7838260c183..b1bdb84ac10f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1612,12 +1612,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr) static u64 kvm_get_arch_capabilities(void) { - u64 data = 0; - - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); - data &= KVM_SUPPORTED_ARCH_CAP; - } + u64 data = x86_read_arch_cap_msr() & KVM_SUPPORTED_ARCH_CAP; /* * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
KVM open-codes x86_read_arch_cap_msr() in a few places. Eliminate them by exposing the helper function and using it directly. No functional change intended. Signed-off-by: Chao Gao <chao.gao@intel.com> --- arch/x86/kernel/cpu/common.c | 1 + arch/x86/kvm/vmx/vmx.c | 19 +++++-------------- arch/x86/kvm/x86.c | 7 +------ 3 files changed, 7 insertions(+), 20 deletions(-)