@@ -87,6 +87,7 @@
#define KVM_REQ_HV_TLB_FLUSH \
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
+#define KVM_REQ_USER_MSR_UPDATE KVM_ARCH_REQ(29)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -1271,6 +1272,8 @@ struct kvm_x86_ops {
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
void (*migrate_timers)(struct kvm_vcpu *vcpu);
+
+ void (*set_user_msr_intercept)(struct kvm_vcpu *vcpu, u32 msr);
};
struct kvm_x86_nested_ops {
@@ -583,13 +583,27 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
return !!test_bit(bit_write, &tmp);
}
+static void __set_msr_interception(u32 *msrpm, u32 msr, int read, int write,
+ u32 offset)
+{
+ u8 bit_read, bit_write;
+ unsigned long tmp;
+
+ bit_read = 2 * (msr & 0x0f);
+ bit_write = 2 * (msr & 0x0f) + 1;
+ tmp = msrpm[offset];
+
+ read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
+ write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
+
+ msrpm[offset] = tmp;
+}
+
static void set_msr_interception(struct kvm_vcpu *vcpu, u32 msr, int read,
int write)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 *msrpm = svm->msrpm;
- u8 bit_read, bit_write;
- unsigned long tmp;
u32 offset;
/*
@@ -598,17 +612,30 @@ static void set_msr_interception(struct kvm_vcpu *vcpu, u32 msr, int read,
*/
WARN_ON(!valid_msr_intercept(msr));
- offset = svm_msrpm_offset(msr);
- bit_read = 2 * (msr & 0x0f);
- bit_write = 2 * (msr & 0x0f) + 1;
- tmp = msrpm[offset];
-
+ offset = svm_msrpm_offset(msr);
BUG_ON(offset == MSR_INVALID);
- read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
- write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
+ __set_msr_interception(msrpm, msr, read, write, offset);
- msrpm[offset] = tmp;
+ if (read || write)
+ kvm_make_request(KVM_REQ_USER_MSR_UPDATE, vcpu);
+}
+
+static void set_user_msr_interception(struct kvm_vcpu *vcpu, u32 msr, int read,
+ int write)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u32 *msrpm = svm->msrpm;
+ u32 offset;
+
+ offset = svm_msrpm_offset(msr);
+ if (offset != MSR_INVALID)
+ __set_msr_interception(msrpm, msr, read, write, offset);
+}
+
+void svm_set_user_msr_intercept(struct kvm_vcpu *vcpu, u32 msr)
+{
+ set_user_msr_interception(vcpu, msr, 0, 0);
}
static void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
@@ -4088,6 +4115,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+
+ .set_user_msr_intercept = svm_set_user_msr_intercept,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
@@ -3728,6 +3728,10 @@ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
__clear_bit(msr, msr_bitmap + 0xc00 / f);
}
+
+ if (type & MSR_TYPE_R || type & MSR_TYPE_W) {
+ kvm_make_request(KVM_REQ_USER_MSR_UPDATE, vcpu);
+ }
}
static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
@@ -3795,7 +3799,7 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
}
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu,
- unsigned long *msr_bitmap, u8 mode)
+ unsigned long *msr_bitmap, u8 mode)
{
int msr;
@@ -3819,6 +3823,11 @@ static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu,
}
}
+void vmx_set_user_msr_intercept(struct kvm_vcpu *vcpu, u32 msr)
+{
+ vmx_enable_intercept_for_msr(vcpu, msr, MSR_TYPE_RW);
+}
+
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7965,6 +7974,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
.migrate_timers = vmx_migrate_timers,
+
+ .set_user_msr_intercept = vmx_set_user_msr_intercept,
};
static __init int hardware_setup(void)
@@ -3555,6 +3555,19 @@ bool kvm_msr_user_exit(struct kvm *kvm, u32 index)
}
EXPORT_SYMBOL_GPL(kvm_msr_user_exit);
+static void kvm_set_user_msr_intercepts(struct kvm_vcpu *vcpu)
+{
+ struct kvm_msr_list *msr_list = vcpu->kvm->arch.user_exit_msrs;
+ u32 i, msr;
+
+ if (msr_list) {
+ for (i = 0; i < msr_list->nmsrs; i++) {
+ msr = msr_list->indices[i];
+ kvm_x86_ops.set_user_msr_intercept(vcpu, msr);
+ }
+ }
+}
+
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r = 0;
@@ -8583,6 +8596,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_vcpu_update_apicv(vcpu);
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
kvm_check_async_pf_completion(vcpu);
+
+ if (kvm_check_request(KVM_REQ_USER_MSR_UPDATE, vcpu))
+ kvm_set_user_msr_intercepts(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {