diff mbox series

[v2,3/8] KVM: x86: Allow em_{rdmsr,wrmsr} to bounce to userspace

Message ID 20200810201134.2031613-4-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Allow userspace to manage MSRs | expand

Commit Message

Aaron Lewis Aug. 10, 2020, 8:11 p.m. UTC
Refactor em_{rdmsr,wrmsr} to allow an MSR to bounce to userspace when it
exists in the list of MSRs userspace has requested to manage with the
ioctl KVM_SET_EXIT_MSRS.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
Based-on-patch-by: Alexander Graf <graf@amazon.com>
---
 arch/x86/kvm/emulate.c | 18 +++++++++--
 arch/x86/kvm/x86.c     | 70 ++++++++++++++++++++++++++++++------------
 2 files changed, 66 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d0e2825ae617..744ab9c92b73 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3689,11 +3689,18 @@  static int em_dr_write(struct x86_emulate_ctxt *ctxt)
 
 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
 {
+	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
 	u64 msr_data;
+	int r;
 
 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
-	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
+	r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
+
+	if (r == X86EMUL_IO_NEEDED)
+		return r;
+
+	if (r)
 		return emulate_gp(ctxt, 0);
 
 	return X86EMUL_CONTINUE;
@@ -3701,9 +3708,16 @@  static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
 
 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
 {
+	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
 	u64 msr_data;
+	int r;
+
+	r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
+
+	if (r == X86EMUL_IO_NEEDED)
+		return r;
 
-	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
+	if (r)
 		return emulate_gp(ctxt, 0);
 
 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 47619b49818a..4dff6147557e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1590,21 +1590,46 @@  static int complete_userspace_wrmsr(struct kvm_vcpu *vcpu)
 	return complete_userspace_msr(vcpu, true);
 }
 
+static int kvm_get_msr_user(struct kvm_vcpu *vcpu, u32 index)
+{
+	if (!kvm_msr_user_exit(vcpu->kvm, index))
+		return 0;
+
+	vcpu->run->exit_reason = KVM_EXIT_X86_RDMSR;
+	vcpu->run->msr.index = index;
+	vcpu->run->msr.data = 0;
+	vcpu->run->msr.inject_gp = 0;
+	memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
+	vcpu->arch.complete_userspace_io =
+		complete_userspace_rdmsr;
+
+	return 1;
+}
+
+static int kvm_set_msr_user(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+	if (!kvm_msr_user_exit(vcpu->kvm, index))
+		return 0;
+
+	vcpu->run->exit_reason = KVM_EXIT_X86_WRMSR;
+	vcpu->run->msr.index = index;
+	vcpu->run->msr.data = data;
+	vcpu->run->msr.inject_gp = 0;
+	memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
+	vcpu->arch.complete_userspace_io =
+		complete_userspace_wrmsr;
+
+	return 1;
+}
+
 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
 {
 	u32 ecx = kvm_rcx_read(vcpu);
 	u64 data;
 
-	if (kvm_msr_user_exit(vcpu->kvm, ecx)) {
-		vcpu->run->exit_reason = KVM_EXIT_X86_RDMSR;
-		vcpu->run->msr.index = ecx;
-		vcpu->run->msr.data = 0;
-		vcpu->run->msr.inject_gp = 0;
-		memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
-		vcpu->arch.complete_userspace_io =
-			complete_userspace_rdmsr;
+	if (kvm_get_msr_user(vcpu, ecx))
+		/* Bounce to user space */
 		return 0;
-	}
 
 	if (kvm_get_msr(vcpu, ecx, &data)) {
 		trace_kvm_msr_read_ex(ecx);
@@ -1625,16 +1650,9 @@  int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 	u32 ecx = kvm_rcx_read(vcpu);
 	u64 data = kvm_read_edx_eax(vcpu);
 
-	if (kvm_msr_user_exit(vcpu->kvm, ecx)) {
-		vcpu->run->exit_reason = KVM_EXIT_X86_WRMSR;
-		vcpu->run->msr.index = ecx;
-		vcpu->run->msr.data = data;
-		vcpu->run->msr.inject_gp = 0;
-		memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
-		vcpu->arch.complete_userspace_io =
-			complete_userspace_wrmsr;
+	if (kvm_set_msr_user(vcpu, ecx, data))
+		/* Bounce to user space */
 		return 0;
-	}
 
 	if (kvm_set_msr(vcpu, ecx, data)) {
 		trace_kvm_msr_write_ex(ecx, data);
@@ -6442,13 +6460,25 @@  static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
 			    u32 msr_index, u64 *pdata)
 {
-	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+	if (kvm_get_msr_user(vcpu, msr_index))
+		/* Bounce to user space */
+		return X86EMUL_IO_NEEDED;
+
+	return kvm_get_msr(vcpu, msr_index, pdata);
 }
 
 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 			    u32 msr_index, u64 data)
 {
-	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+	if (kvm_set_msr_user(vcpu, msr_index, data))
+		/* Bounce to user space */
+		return X86EMUL_IO_NEEDED;
+
+	return kvm_set_msr(vcpu, msr_index, data);
 }
 
 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)