@@ -276,8 +276,8 @@ u32 svm_msrpm_offset(u32 msr)
offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
offset += (i * MSRS_RANGE_SIZE); /* add range offset */
- /* Now we have the u8 offset - but need the u32 offset */
- return offset / 4;
+ /* Now we have the u8 offset - but need the ulong offset */
+ return offset / sizeof(unsigned long);
}
/* MSR not in any range */
@@ -799,9 +799,9 @@ static bool valid_msr_intercept(u32 index)
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
{
u8 bit_write;
+ unsigned long *msrpm;
unsigned long tmp;
u32 offset;
- u32 *msrpm;
/*
* For non-nested case:
@@ -824,7 +824,7 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
return test_bit(bit_write, &tmp);
}
-static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
+static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, unsigned long *msrpm,
u32 msr, int read, int write)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -861,18 +861,18 @@ static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
svm->nested.force_msr_bitmap_recalc = true;
}
-void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
+void set_msr_interception(struct kvm_vcpu *vcpu, unsigned long *msrpm, u32 msr,
int read, int write)
{
set_shadow_msr_intercept(vcpu, msr, read, write);
set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
}
-u32 *svm_vcpu_alloc_msrpm(void)
+unsigned long *svm_vcpu_alloc_msrpm(void)
{
unsigned int order = get_order(MSRPM_SIZE);
struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
- u32 *msrpm;
+ unsigned long *msrpm;
if (!pages)
return NULL;
@@ -883,7 +883,7 @@ u32 *svm_vcpu_alloc_msrpm(void)
return msrpm;
}
-void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
+void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, unsigned long *msrpm)
{
int i;
@@ -917,7 +917,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
svm->x2avic_msrs_intercepted = intercept;
}
-void svm_vcpu_free_msrpm(u32 *msrpm)
+void svm_vcpu_free_msrpm(unsigned long *msrpm)
{
__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
}
@@ -185,7 +185,7 @@ struct svm_nested_state {
u64 last_vmcb12_gpa;
/* These are the merged vectors */
- u32 *msrpm;
+ unsigned long *msrpm;
/* A VMRUN has started but has not yet been performed, so
* we cannot inject a nested vmexit yet. */
@@ -266,7 +266,7 @@ struct vcpu_svm {
*/
u64 virt_spec_ctrl;
- u32 *msrpm;
+ unsigned long *msrpm;
ulong nmi_iret_rip;
@@ -596,9 +596,9 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
extern bool dump_invalid_vmcb;
u32 svm_msrpm_offset(u32 msr);
-u32 *svm_vcpu_alloc_msrpm(void);
-void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
-void svm_vcpu_free_msrpm(u32 *msrpm);
+unsigned long *svm_vcpu_alloc_msrpm(void);
+void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, unsigned long *msrpm);
+void svm_vcpu_free_msrpm(unsigned long *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
void svm_enable_lbrv(struct kvm_vcpu *vcpu);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
@@ -612,7 +612,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
-void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
+void set_msr_interception(struct kvm_vcpu *vcpu, unsigned long *msrpm, u32 msr,
int read, int write);
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
Use "unsigned long" instead of "u32" to track MSRPM to match the bitmap API. Signed-off-by: Sean Christopherson <seanjc@google.com> Co-developed-by: Aaron Lewis <aaronlewis@google.com> --- arch/x86/kvm/svm/svm.c | 18 +++++++++--------- arch/x86/kvm/svm/svm.h | 12 ++++++------ 2 files changed, 15 insertions(+), 15 deletions(-)