@@ -4271,7 +4271,7 @@ static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
return vmx->msr_autoload.guest.val[i].value;
}
- efer_msr = find_msr_entry(vmx, MSR_EFER);
+ efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
if (efer_msr)
return efer_msr->data;
@@ -633,7 +633,7 @@ static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
-struct vmx_uret_msr *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -1927,7 +1927,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
goto find_uret_msr;
default:
find_uret_msr:
- msr = find_msr_entry(vmx, msr_info->index);
+ msr = vmx_find_uret_msr(vmx, msr_info->index);
if (msr) {
msr_info->data = msr->data;
break;
@@ -2201,7 +2201,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default:
find_uret_msr:
- msr = find_msr_entry(vmx, msr_index);
+ msr = vmx_find_uret_msr(vmx, msr_index);
if (msr)
ret = vmx_set_guest_msr(vmx, msr, data);
else
@@ -2833,7 +2833,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmx_uret_msr *msr = find_msr_entry(vmx, MSR_EFER);
+ struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
if (!msr)
return;
@@ -7237,7 +7237,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_RTM)) {
struct vmx_uret_msr *msr;
- msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL);
+ msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (msr) {
bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
@@ -350,7 +350,7 @@ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
-struct vmx_uret_msr *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
+struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
Rename "find_msr_entry" to scope it to VMX and to associate it with guest_uret_msrs. Drop the "entry" so that the function name pairs with the existing __vmx_find_uret_msr(), which intentionally uses a double underscore prefix instead of appending "index" or "slot" as those names are already claimed by other pieces of the user return MSR stack. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 10 +++++----- arch/x86/kvm/vmx/vmx.h | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-)