diff mbox series

[15/15] KVM: x86: Hoist VMX MSR intercepts to common x86 code

Message ID 20241127201929.4005605-16-aaronlewis@google.com (mailing list archive)
State New
Headers show
Series Unify MSR intercepts in x86 | expand

Commit Message

Aaron Lewis Nov. 27, 2024, 8:19 p.m. UTC
Complete the transition of unifying the MSR intercepts for x86 by
hoisting the VMX implementation to common x86 code.

The only new addition to the common implementation over what SVM
already contributed is the check for is_valid_passthrough_msr() which
VMX uses to disallow MSRs from being used as possible passthrough
MSRs.  To distinguish between MSRs that are not valid from MSRs that
are missing from the list kvm_passthrough_msr_slot() returns -EINVAL
for MSRs that are not allowed to be in the list and -ENOENT for MSRs
that it is expecting to be in the list, but aren't.  For the latter
case KVM warns.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  1 +
 arch/x86/kvm/svm/svm.c             |  6 ++
 arch/x86/kvm/vmx/main.c            |  2 +
 arch/x86/kvm/vmx/vmx.c             | 91 +++++++++---------------------
 arch/x86/kvm/vmx/vmx.h             |  4 ++
 arch/x86/kvm/x86.c                 |  4 ++
 7 files changed, 45 insertions(+), 64 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 3f10ce4957f74..db1e0fc002805 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -134,6 +134,7 @@  KVM_X86_OP_OPTIONAL(migrate_timers)
 KVM_X86_OP_OPTIONAL(msr_filter_changed)
 KVM_X86_OP_OPTIONAL(get_msr_bitmap_entries)
 KVM_X86_OP(disable_intercept_for_msr)
+KVM_X86_OP(is_valid_passthrough_msr)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 763fc054a2c56..22ae4dfa94f2c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1834,6 +1834,7 @@  struct kvm_x86_ops {
 				       unsigned long **read_map, u8 *read_bit,
 				       unsigned long **write_map, u8 *write_bit);
 	void (*disable_intercept_for_msr)(struct kvm_vcpu *vcpu, u32 msr, int type);
+	bool (*is_valid_passthrough_msr)(u32 msr);
 	void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
 	int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index aaf244e233b90..2e746abeda215 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -799,6 +799,11 @@  static void svm_get_msr_bitmap_entries(struct kvm_vcpu *vcpu, u32 msr,
 	*write_map = &svm->msrpm[offset];
 }
 
+static bool svm_is_valid_passthrough_msr(u32 msr)
+{
+	return true;
+}
+
 void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
 	kvm_disable_intercept_for_msr(vcpu, msr, type);
@@ -5065,6 +5070,7 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.nr_possible_passthrough_msrs = ARRAY_SIZE(direct_access_msrs),
 	.get_msr_bitmap_entries = svm_get_msr_bitmap_entries,
 	.disable_intercept_for_msr = svm_disable_intercept_for_msr,
+	.is_valid_passthrough_msr = svm_is_valid_passthrough_msr,
 	.complete_emulated_msr = svm_complete_emulated_msr,
 
 	.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 5279c82648fe6..e89c472179dd5 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -179,7 +179,9 @@  struct kvm_x86_ops vt_x86_ops __initdata = {
 
 	.possible_passthrough_msrs = vmx_possible_passthrough_msrs,
 	.nr_possible_passthrough_msrs = ARRAY_SIZE(vmx_possible_passthrough_msrs),
+	.get_msr_bitmap_entries = vmx_get_msr_bitmap_entries,
 	.disable_intercept_for_msr = vmx_disable_intercept_for_msr,
+	.is_valid_passthrough_msr = vmx_is_valid_passthrough_msr,
 	.msr_filter_changed = vmx_msr_filter_changed,
 	.complete_emulated_msr = kvm_complete_insn_gp,
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4cb3e9a8df2c0..5493a24febd50 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -642,14 +642,12 @@  static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
 	return flexpriority_enabled && lapic_in_kernel(vcpu);
 }
 
-static int vmx_get_passthrough_msr_slot(u32 msr)
+bool vmx_is_valid_passthrough_msr(u32 msr)
 {
-	int r;
-
 	switch (msr) {
 	case 0x800 ... 0x8ff:
 		/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
-		return -ENOENT;
+		return false;
 	case MSR_IA32_RTIT_STATUS:
 	case MSR_IA32_RTIT_OUTPUT_BASE:
 	case MSR_IA32_RTIT_OUTPUT_MASK:
@@ -664,13 +662,10 @@  static int vmx_get_passthrough_msr_slot(u32 msr)
 	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
 	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
 		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
-		return -ENOENT;
+		return false;
 	}
 
-	r = kvm_passthrough_msr_slot(msr);
-
-	WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
-	return r;
+	return true;
 }
 
 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
@@ -3969,76 +3964,44 @@  static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
 	vmx->nested.force_msr_bitmap_recalc = true;
 }
 
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void vmx_get_msr_bitmap_entries(struct kvm_vcpu *vcpu, u32 msr,
+				unsigned long **read_map, u8 *read_bit,
+				unsigned long **write_map, u8 *write_bit)
 {
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
-	int idx;
-
-	if (!cpu_has_vmx_msr_bitmap())
-		return;
+	unsigned long *bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
+	u32 offset;
 
-	vmx_msr_bitmap_l01_changed(vmx);
+	*read_bit = *write_bit = msr & 0x1fff;
 
-	/*
-	 * Mark the desired intercept state in shadow bitmap, this is needed
-	 * for resync when the MSR filters change.
-	 */
-	idx = vmx_get_passthrough_msr_slot(msr);
-	if (idx >= 0) {
-		if (type & MSR_TYPE_R)
-			__clear_bit(idx, vcpu->arch.shadow_msr_intercept.read);
-		if (type & MSR_TYPE_W)
-			__clear_bit(idx, vcpu->arch.shadow_msr_intercept.write);
-	}
+	if (msr <= 0x1fff)
+		offset = 0;
+	else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
+		offset = 0x400;
+	else
+		BUG();
 
-	if ((type & MSR_TYPE_R) &&
-	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
-		vmx_set_msr_bitmap_read(msr_bitmap, msr);
-		type &= ~MSR_TYPE_R;
-	}
+	*read_map = bitmap + (0 + offset) / sizeof(unsigned long);
+	*write_map = bitmap + (0x800 + offset) / sizeof(unsigned long);
+}
 
-	if ((type & MSR_TYPE_W) &&
-	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
-		vmx_set_msr_bitmap_write(msr_bitmap, msr);
-		type &= ~MSR_TYPE_W;
-	}
+void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+	if (!cpu_has_vmx_msr_bitmap())
+		return;
 
-	if (type & MSR_TYPE_R)
-		vmx_clear_msr_bitmap_read(msr_bitmap, msr);
+	kvm_disable_intercept_for_msr(vcpu, msr, type);
 
-	if (type & MSR_TYPE_W)
-		vmx_clear_msr_bitmap_write(msr_bitmap, msr);
+	vmx_msr_bitmap_l01_changed(to_vmx(vcpu));
 }
 
 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
-	int idx;
-
 	if (!cpu_has_vmx_msr_bitmap())
 		return;
 
-	vmx_msr_bitmap_l01_changed(vmx);
-
-	/*
-	 * Mark the desired intercept state in shadow bitmap, this is needed
-	 * for resync when the MSR filter changes.
-	 */
-	idx = vmx_get_passthrough_msr_slot(msr);
-	if (idx >= 0) {
-		if (type & MSR_TYPE_R)
-			__set_bit(idx, vcpu->arch.shadow_msr_intercept.read);
-		if (type & MSR_TYPE_W)
-			__set_bit(idx, vcpu->arch.shadow_msr_intercept.write);
-	}
-
-	if (type & MSR_TYPE_R)
-		vmx_set_msr_bitmap_read(msr_bitmap, msr);
+	kvm_enable_intercept_for_msr(vcpu, msr, type);
 
-	if (type & MSR_TYPE_W)
-		vmx_set_msr_bitmap_write(msr_bitmap, msr);
+	vmx_msr_bitmap_l01_changed(to_vmx(vcpu));
 }
 
 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index c40e7c880764f..6b87dcab46e48 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -409,8 +409,12 @@  bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
 
+void vmx_get_msr_bitmap_entries(struct kvm_vcpu *vcpu, u32 msr,
+				unsigned long **read_map, u8 *read_bit,
+				unsigned long **write_map, u8 *write_bit);
 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+bool vmx_is_valid_passthrough_msr(u32 msr);
 
 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1e607a0eb58a0..3c4a580d51517 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1810,6 +1810,10 @@  int kvm_passthrough_msr_slot(u32 msr)
 {
 	u32 i;
 
+	if (!static_call(kvm_x86_is_valid_passthrough_msr)(msr)) {
+		return -EINVAL;
+	}
+
 	for (i = 0; i < kvm_x86_ops.nr_possible_passthrough_msrs; i++) {
 		if (kvm_x86_ops.possible_passthrough_msrs[i] == msr)
 			return i;