Message ID | 20200923180409.32255-4-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: VMX: Fix MSR namespacing | expand |
On 23/09/20 20:03, Sean Christopherson wrote: > Add "loadstore" to vmx_find_msr_index() to differentiate it from the so > called shared MSRs helpers (which will soon be renamed), and replace > "index" with "slot" to better convey that the helper returns slot in the > array, not the MSR index (the value that gets stuffed into ECX). > > No functional change intended. "slot" is definitely better, I'll adjust SVM to use it too. Paolo > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > --- > arch/x86/kvm/vmx/nested.c | 16 ++++++++-------- > arch/x86/kvm/vmx/vmx.c | 10 +++++----- > arch/x86/kvm/vmx/vmx.h | 2 +- > 3 files changed, 14 insertions(+), 14 deletions(-) > > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c > index f818a406302a..87e5d606582e 100644 > --- a/arch/x86/kvm/vmx/nested.c > +++ b/arch/x86/kvm/vmx/nested.c > @@ -938,11 +938,11 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, > * VM-exit in L0, use the more accurate value. > */ > if (msr_index == MSR_IA32_TSC) { > - int index = vmx_find_msr_index(&vmx->msr_autostore.guest, > - MSR_IA32_TSC); > + int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, > + MSR_IA32_TSC); > > - if (index >= 0) { > - u64 val = vmx->msr_autostore.guest.val[index].value; > + if (i >= 0) { > + u64 val = vmx->msr_autostore.guest.val[i].value; > > *data = kvm_read_l1_tsc(vcpu, val); > return true; > @@ -1031,12 +1031,12 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, > struct vcpu_vmx *vmx = to_vmx(vcpu); > struct vmx_msrs *autostore = &vmx->msr_autostore.guest; > bool in_vmcs12_store_list; > - int msr_autostore_index; > + int msr_autostore_slot; > bool in_autostore_list; > int last; > > - msr_autostore_index = vmx_find_msr_index(autostore, msr_index); > - in_autostore_list = msr_autostore_index >= 0; > + msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); > + in_autostore_list = msr_autostore_slot >= 0; > in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); > > if (in_vmcs12_store_list && !in_autostore_list) { > @@ -1057,7 +1057,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, > autostore->val[last].index = msr_index; > } else if (!in_vmcs12_store_list && in_autostore_list) { > last = --autostore->nr; > - autostore->val[msr_autostore_index] = autostore->val[last]; > + autostore->val[msr_autostore_slot] = autostore->val[last]; > } > } > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index e99f3bbfa6e9..35291fd90ca0 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -824,7 +824,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, > vm_exit_controls_clearbit(vmx, exit); > } > > -int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) > +int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr) > { > unsigned int i; > > @@ -858,7 +858,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) > } > break; > } > - i = vmx_find_msr_index(&m->guest, msr); > + i = vmx_find_loadstore_msr_slot(&m->guest, msr); > if (i < 0) > goto skip_guest; > --m->guest.nr; > @@ -866,7 +866,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) > vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); > > skip_guest: > - i = vmx_find_msr_index(&m->host, msr); > + i = vmx_find_loadstore_msr_slot(&m->host, msr); > if (i < 0) > return; > > @@ -925,9 +925,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, > wrmsrl(MSR_IA32_PEBS_ENABLE, 0); > } > > - i = vmx_find_msr_index(&m->guest, msr); > + i = vmx_find_loadstore_msr_slot(&m->guest, msr); > if (!entry_only) > - j = vmx_find_msr_index(&m->host, msr); > + j = vmx_find_loadstore_msr_slot(&m->host, msr); > > if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || > (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { > diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h > index 9a418c274880..26887082118d 100644 > --- a/arch/x86/kvm/vmx/vmx.h > +++ b/arch/x86/kvm/vmx/vmx.h > @@ -353,7 +353,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); > struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); > void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); > void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); > -int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); > +int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); > void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); > > #define POSTED_INTR_ON 0 >
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f818a406302a..87e5d606582e 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -938,11 +938,11 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, * VM-exit in L0, use the more accurate value. */ if (msr_index == MSR_IA32_TSC) { - int index = vmx_find_msr_index(&vmx->msr_autostore.guest, - MSR_IA32_TSC); + int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, + MSR_IA32_TSC); - if (index >= 0) { - u64 val = vmx->msr_autostore.guest.val[index].value; + if (i >= 0) { + u64 val = vmx->msr_autostore.guest.val[i].value; *data = kvm_read_l1_tsc(vcpu, val); return true; @@ -1031,12 +1031,12 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmx_msrs *autostore = &vmx->msr_autostore.guest; bool in_vmcs12_store_list; - int msr_autostore_index; + int msr_autostore_slot; bool in_autostore_list; int last; - msr_autostore_index = vmx_find_msr_index(autostore, msr_index); - in_autostore_list = msr_autostore_index >= 0; + msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); + in_autostore_list = msr_autostore_slot >= 0; in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); if (in_vmcs12_store_list && !in_autostore_list) { @@ -1057,7 +1057,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, autostore->val[last].index = msr_index; } else if (!in_vmcs12_store_list && in_autostore_list) { last = --autostore->nr; - autostore->val[msr_autostore_index] = autostore->val[last]; + autostore->val[msr_autostore_slot] = autostore->val[last]; } } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e99f3bbfa6e9..35291fd90ca0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -824,7 +824,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, vm_exit_controls_clearbit(vmx, exit); } -int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) +int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr) { unsigned int i; @@ -858,7 +858,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) } break; } - i = vmx_find_msr_index(&m->guest, msr); + i = vmx_find_loadstore_msr_slot(&m->guest, msr); if (i < 0) goto skip_guest; --m->guest.nr; @@ -866,7 +866,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); skip_guest: - i = vmx_find_msr_index(&m->host, msr); + i = vmx_find_loadstore_msr_slot(&m->host, msr); if (i < 0) return; @@ -925,9 +925,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } - i = vmx_find_msr_index(&m->guest, msr); + i = vmx_find_loadstore_msr_slot(&m->guest, msr); if (!entry_only) - j = vmx_find_msr_index(&m->host, msr); + j = vmx_find_loadstore_msr_slot(&m->host, msr); if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 9a418c274880..26887082118d 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -353,7 +353,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); -int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); +int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); #define POSTED_INTR_ON 0
Add "loadstore" to vmx_find_msr_index() to differentiate it from the so called shared MSRs helpers (which will soon be renamed), and replace "index" with "slot" to better convey that the helper returns slot in the array, not the MSR index (the value that gets stuffed into ECX). No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/vmx/nested.c | 16 ++++++++-------- arch/x86/kvm/vmx/vmx.c | 10 +++++----- arch/x86/kvm/vmx/vmx.h | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-)