diff mbox series

[v4,2/2] x86/vmx: optimize MSR_MISC_FEATURES_ENABLES switch

Message ID 20190329135422.15046-3-xiaoyao.li@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Switch MSR_MISC_FEATURES_ENABLES and one optimization | expand

Commit Message

Xiaoyao Li March 29, 2019, 1:54 p.m. UTC
KVM needs to switch MSR_MISC_FEATURES_ENABLES between host and guest in
every pcpu/vcpu context switch. Since WRMSR is expensive, this patch tries
to save cycles by avoiding WRMSR MSR_MISC_FEATURES_ENABLES whenever possible.

If host's value is zero, nothing needs to do, since guest can use kvm
emulated cpuid faulting.

If host's value is non-zero, it need not clear MSR_MISC_FEATURES_ENABLES
unconditionally. We can use hardware cpuid faulting if guest's
value is equal to host'value, thus avoid WRMSR
MSR_MISC_FEATURES_ENABLES.

Since hardware cpuid faulting takes higher priority than CPUID vm exit,
it should be updated to hardware while guest wrmsr and hardware cpuid
faulting is used for guest.

Note that MSR_MISC_FEATURES_ENABLES only exists in Intel CPU, only
applying this optimization to vmx.

Signed-off-by: Xiaoyao Li <xiaoyao.li@linux.intel.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/vmx/vmx.c          | 15 ++++++++++++---
 arch/x86/kvm/x86.c              | 11 ++++++++---
 3 files changed, 22 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2c53df4a5a2a..105691e069d9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1343,6 +1343,8 @@  void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
 
+bool kvm_misc_features_enables_msr_invalid(struct kvm_vcpu *vcpu, u64 data);
+
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cb0f63879a25..07a598663ace 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1041,6 +1041,7 @@  void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 	unsigned long fs_base, gs_base;
 	u16 fs_sel, gs_sel;
 	int i;
+	u64 msrval;
 
 	vmx->req_immediate_exit = false;
 
@@ -1064,8 +1065,9 @@  void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 	vmx->loaded_cpu_state = vmx->loaded_vmcs;
 	host_state = &vmx->loaded_cpu_state->host_state;
 
-	if (this_cpu_read(msr_misc_features_shadow))
-		wrmsrl(MSR_MISC_FEATURES_ENABLES, 0ULL);
+	msrval = this_cpu_read(msr_misc_features_shadow);
+	if (msrval && msrval != vcpu->arch.msr_misc_features_enables)
+		wrmsrl(MSR_MISC_FEATURES_ENABLES, vcpu->arch.msr_misc_features_enables);
 
 	/*
 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
@@ -1138,7 +1140,7 @@  static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
 	vmx->loaded_cpu_state = NULL;
 
 	msrval = this_cpu_read(msr_misc_features_shadow);
-	if (msrval)
+	if (msrval && msrval != vmx->vcpu.arch.msr_misc_features_enables)
 		wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
 
 #ifdef CONFIG_X86_64
@@ -2027,6 +2029,13 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		else
 			vmx->pt_desc.guest.addr_a[index / 2] = data;
 		break;
+	case MSR_MISC_FEATURES_ENABLES:
+		if (kvm_misc_features_enables_msr_invalid(vcpu, data))
+			return 1;
+		if (vmx->loaded_cpu_state && this_cpu_read(msr_misc_features_shadow))
+			wrmsrl(MSR_MISC_FEATURES_ENABLES, data);
+		vcpu->arch.msr_misc_features_enables = data;
+		break;
 	case MSR_TSC_AUX:
 		if (!msr_info->host_initiated &&
 		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ad1df965574e..749aa4c9437a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2449,6 +2449,13 @@  static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
+bool kvm_misc_features_enables_msr_invalid(struct kvm_vcpu *vcpu, u64 data)
+{
+	return (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT) ||
+	       (data && !supports_cpuid_fault(vcpu));
+}
+EXPORT_SYMBOL_GPL(kvm_misc_features_enables_msr_invalid);
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	bool pr = false;
@@ -2669,9 +2676,7 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		vcpu->arch.msr_platform_info = data;
 		break;
 	case MSR_MISC_FEATURES_ENABLES:
-		if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
-		    (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
-		     !supports_cpuid_fault(vcpu)))
+		if (kvm_misc_features_enables_msr_invalid(vcpu, data))
 			return 1;
 		vcpu->arch.msr_misc_features_enables = data;
 		break;