Message ID | 20240425181422.3250947-6-seanjc@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: Clean up MSR access/failure handling | expand |
On 4/26/2024 2:14 AM, Sean Christopherson wrote: > Rename all APIs related to feature MSRs from get_feature_msr() to s /get_feature_msr()/get_msr_feature() > get_feature_msr(). The APIs get "feature MSRs", not "MSR features". > And unlike kvm_{g,s}et_msr_common(), the "feature" adjective doesn't > describe the helper itself. > > No functional change intended. > > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/include/asm/kvm-x86-ops.h | 2 +- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/svm/svm.c | 6 +++--- > arch/x86/kvm/vmx/main.c | 2 +- > arch/x86/kvm/vmx/vmx.c | 2 +- > arch/x86/kvm/vmx/x86_ops.h | 2 +- > arch/x86/kvm/x86.c | 12 ++++++------ > 7 files changed, 14 insertions(+), 14 deletions(-) > > diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h > index 5187fcf4b610..9f25b4a49d6b 100644 > --- a/arch/x86/include/asm/kvm-x86-ops.h > +++ b/arch/x86/include/asm/kvm-x86-ops.h > @@ -128,7 +128,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_unregister_region) > KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from) > KVM_X86_OP_OPTIONAL(vm_move_enc_context_from) > KVM_X86_OP_OPTIONAL(guest_memory_reclaimed) > -KVM_X86_OP(get_msr_feature) > +KVM_X86_OP(get_feature_msr) > KVM_X86_OP(check_emulate_instruction) > KVM_X86_OP(apic_init_signal_blocked) > KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush) > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 7d56e5a52ae3..cc04ab0c234e 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1785,7 +1785,7 @@ struct kvm_x86_ops { > int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); > void (*guest_memory_reclaimed)(struct kvm *kvm); > > - int (*get_msr_feature)(u32 msr, u64 *data); > + int (*get_feature_msr)(u32 msr, u64 *data); > > int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type, > void *insn, int insn_len); > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 15422b7d9149..d95cd230540d 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -2796,7 +2796,7 @@ static int efer_trap(struct kvm_vcpu *vcpu) > return kvm_complete_insn_gp(vcpu, ret); > } > > -static int svm_get_msr_feature(u32 msr, u64 *data) > +static int svm_get_feature_msr(u32 msr, u64 *data) > { > *data = 0; > > @@ -3134,7 +3134,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) > case MSR_AMD64_DE_CFG: { > u64 supported_de_cfg; > > - if (svm_get_msr_feature(ecx, &supported_de_cfg)) > + if (svm_get_feature_msr(ecx, &supported_de_cfg)) > return 1; > > if (data & ~supported_de_cfg) > @@ -4944,7 +4944,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { > .vcpu_unblocking = avic_vcpu_unblocking, > > .update_exception_bitmap = svm_update_exception_bitmap, > - .get_msr_feature = svm_get_msr_feature, > + .get_feature_msr = svm_get_feature_msr, > .get_msr = svm_get_msr, > .set_msr = svm_set_msr, > .get_segment_base = svm_get_segment_base, > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c > index 7c546ad3e4c9..c670f4cf6d94 100644 > --- a/arch/x86/kvm/vmx/main.c > +++ b/arch/x86/kvm/vmx/main.c > @@ -40,7 +40,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { > .vcpu_put = vmx_vcpu_put, > > .update_exception_bitmap = vmx_update_exception_bitmap, > - .get_msr_feature = vmx_get_msr_feature, > + .get_feature_msr = vmx_get_feature_msr, > .get_msr = vmx_get_msr, > .set_msr = vmx_set_msr, > .get_segment_base = vmx_get_segment_base, > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 25b0a838abd6..fe2bf8f31d7c 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -1955,7 +1955,7 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, > return !(msr->data & ~valid_bits); > } > > -int vmx_get_msr_feature(u32 msr, u64 *data) > +int vmx_get_feature_msr(u32 msr, u64 *data) > { > switch (msr) { > case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h > index 504d56d6837d..4b81c85e9357 100644 > --- a/arch/x86/kvm/vmx/x86_ops.h > +++ b/arch/x86/kvm/vmx/x86_ops.h > @@ -58,7 +58,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index); > void vmx_msr_filter_changed(struct kvm_vcpu *vcpu); > void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); > void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); > -int vmx_get_msr_feature(u32 msr, u64 *data); > +int vmx_get_feature_msr(u32 msr, u64 *data); > int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); > u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg); > void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 03e50812ab33..8f58181f2b6d 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1682,7 +1682,7 @@ static u64 kvm_get_arch_capabilities(void) > return data; > } > > -static int kvm_get_msr_feature(struct kvm_msr_entry *msr) > +static int kvm_get_feature_msr(struct kvm_msr_entry *msr) > { > switch (msr->index) { > case MSR_IA32_ARCH_CAPABILITIES: > @@ -1695,12 +1695,12 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) > rdmsrl_safe(msr->index, &msr->data); > break; > default: > - return static_call(kvm_x86_get_msr_feature)(msr->index, &msr->data); > + return static_call(kvm_x86_get_feature_msr)(msr->index, &msr->data); > } > return 0; > } > > -static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) > +static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) > { > struct kvm_msr_entry msr; > int r; > @@ -1708,7 +1708,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) > /* Unconditionally clear the output for simplicity */ > msr.data = 0; > msr.index = index; > - r = kvm_get_msr_feature(&msr); > + r = kvm_get_feature_msr(&msr); > > if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false)) > r = 0; > @@ -4962,7 +4962,7 @@ long kvm_arch_dev_ioctl(struct file *filp, > break; > } > case KVM_GET_MSRS: > - r = msr_io(NULL, argp, do_get_msr_feature, 1); > + r = msr_io(NULL, argp, do_get_feature_msr, 1); > break; > #ifdef CONFIG_KVM_HYPERV > case KVM_GET_SUPPORTED_HV_CPUID: > @@ -7367,7 +7367,7 @@ static void kvm_probe_feature_msr(u32 msr_index) > .index = msr_index, > }; > > - if (kvm_get_msr_feature(&msr)) > + if (kvm_get_feature_msr(&msr)) > return; > > msr_based_features[num_msr_based_features++] = msr_index;
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 5187fcf4b610..9f25b4a49d6b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -128,7 +128,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_unregister_region) KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from) KVM_X86_OP_OPTIONAL(vm_move_enc_context_from) KVM_X86_OP_OPTIONAL(guest_memory_reclaimed) -KVM_X86_OP(get_msr_feature) +KVM_X86_OP(get_feature_msr) KVM_X86_OP(check_emulate_instruction) KVM_X86_OP(apic_init_signal_blocked) KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7d56e5a52ae3..cc04ab0c234e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1785,7 +1785,7 @@ struct kvm_x86_ops { int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); void (*guest_memory_reclaimed)(struct kvm *kvm); - int (*get_msr_feature)(u32 msr, u64 *data); + int (*get_feature_msr)(u32 msr, u64 *data); int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 15422b7d9149..d95cd230540d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2796,7 +2796,7 @@ static int efer_trap(struct kvm_vcpu *vcpu) return kvm_complete_insn_gp(vcpu, ret); } -static int svm_get_msr_feature(u32 msr, u64 *data) +static int svm_get_feature_msr(u32 msr, u64 *data) { *data = 0; @@ -3134,7 +3134,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_AMD64_DE_CFG: { u64 supported_de_cfg; - if (svm_get_msr_feature(ecx, &supported_de_cfg)) + if (svm_get_feature_msr(ecx, &supported_de_cfg)) return 1; if (data & ~supported_de_cfg) @@ -4944,7 +4944,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_unblocking = avic_vcpu_unblocking, .update_exception_bitmap = svm_update_exception_bitmap, - .get_msr_feature = svm_get_msr_feature, + .get_feature_msr = svm_get_feature_msr, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 7c546ad3e4c9..c670f4cf6d94 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -40,7 +40,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .vcpu_put = vmx_vcpu_put, .update_exception_bitmap = vmx_update_exception_bitmap, - .get_msr_feature = vmx_get_msr_feature, + .get_feature_msr = vmx_get_feature_msr, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 25b0a838abd6..fe2bf8f31d7c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1955,7 +1955,7 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, return !(msr->data & ~valid_bits); } -int vmx_get_msr_feature(u32 msr, u64 *data) +int vmx_get_feature_msr(u32 msr, u64 *data) { switch (msr) { case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 504d56d6837d..4b81c85e9357 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -58,7 +58,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index); void vmx_msr_filter_changed(struct kvm_vcpu *vcpu); void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); -int vmx_get_msr_feature(u32 msr, u64 *data); +int vmx_get_feature_msr(u32 msr, u64 *data); int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03e50812ab33..8f58181f2b6d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1682,7 +1682,7 @@ static u64 kvm_get_arch_capabilities(void) return data; } -static int kvm_get_msr_feature(struct kvm_msr_entry *msr) +static int kvm_get_feature_msr(struct kvm_msr_entry *msr) { switch (msr->index) { case MSR_IA32_ARCH_CAPABILITIES: @@ -1695,12 +1695,12 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) rdmsrl_safe(msr->index, &msr->data); break; default: - return static_call(kvm_x86_get_msr_feature)(msr->index, &msr->data); + return static_call(kvm_x86_get_feature_msr)(msr->index, &msr->data); } return 0; } -static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) +static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct kvm_msr_entry msr; int r; @@ -1708,7 +1708,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) /* Unconditionally clear the output for simplicity */ msr.data = 0; msr.index = index; - r = kvm_get_msr_feature(&msr); + r = kvm_get_feature_msr(&msr); if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false)) r = 0; @@ -4962,7 +4962,7 @@ long kvm_arch_dev_ioctl(struct file *filp, break; } case KVM_GET_MSRS: - r = msr_io(NULL, argp, do_get_msr_feature, 1); + r = msr_io(NULL, argp, do_get_feature_msr, 1); break; #ifdef CONFIG_KVM_HYPERV case KVM_GET_SUPPORTED_HV_CPUID: @@ -7367,7 +7367,7 @@ static void kvm_probe_feature_msr(u32 msr_index) .index = msr_index, }; - if (kvm_get_msr_feature(&msr)) + if (kvm_get_feature_msr(&msr)) return; msr_based_features[num_msr_based_features++] = msr_index;
Rename all APIs related to feature MSRs from get_feature_msr() to get_feature_msr(). The APIs get "feature MSRs", not "MSR features". And unlike kvm_{g,s}et_msr_common(), the "feature" adjective doesn't describe the helper itself. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/kvm-x86-ops.h | 2 +- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm/svm.c | 6 +++--- arch/x86/kvm/vmx/main.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/vmx/x86_ops.h | 2 +- arch/x86/kvm/x86.c | 12 ++++++------ 7 files changed, 14 insertions(+), 14 deletions(-)