Message ID | 20240423221521.2923759-1-seanjc@google.com (mailing list archive) |
---|---|
Headers | show |
Series | KVM: x86: Collect host state snapshots into a struct | expand |
On Wednesday, April 24, 2024 6:15 AM, Sean Christopherson wrote: > Add a global "kvm_host" structure to hold various host values, e.g. for EFER, > XCR0, raw MAXPHYADDR etc., instead of having a bunch of one-off variables > that inevitably need to be exported, or in the case of shadow_phys_bits, are > buried in a random location and are awkward to use, leading to duplicate > code. Looks good. How about applying similar improvements to the module parameters as well? I've changed the "enable_pmu" parameter as an example below: diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 77352a4abd87..a221ba7b546f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1013,7 +1013,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) union cpuid10_eax eax; union cpuid10_edx edx; - if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { + if (!kvm_caps.enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } @@ -1306,7 +1306,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) union cpuid_0x80000022_ebx ebx; entry->ecx = entry->edx = 0; - if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { + if (!kvm_caps.enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { entry->eax = entry->ebx; break; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 4d52b0b539ba..7e359db64dbd 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -190,9 +190,9 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) * for hybrid PMUs until KVM gains a way to let userspace opt-in. */ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) - enable_pmu = false; + kvm_caps.enable_pmu = false; - if (enable_pmu) { + if (kvm_caps.enable_pmu) { perf_get_x86_pmu_capability(&kvm_pmu_cap); /* @@ -203,12 +203,12 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) */ if (!kvm_pmu_cap.num_counters_gp || WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs)) - enable_pmu = false; + kvm_caps.enable_pmu = false; else if (is_intel && !kvm_pmu_cap.version) - enable_pmu = false; + kvm_caps.enable_pmu = false; } - if (!enable_pmu) { + if (!kvm_caps.enable_pmu) { memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); return; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7ea0e7f13da4..4ed8c73f88e4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7869,7 +7869,7 @@ static __init u64 vmx_get_perf_capabilities(void) u64 perf_cap = PMU_CAP_FW_WRITES; u64 host_perf_cap = 0; - if (!enable_pmu) + if (!kvm_caps.enable_pmu) return 0; if (boot_cpu_has(X86_FEATURE_PDCM)) @@ -7938,7 +7938,7 @@ static __init void vmx_set_cpu_caps(void) kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64); } - if (!enable_pmu) + if (!kvm_caps.enable_pmu) kvm_cpu_cap_clear(X86_FEATURE_PDCM); kvm_caps.supported_perf_cap = vmx_get_perf_capabilities(); @@ -8683,7 +8683,7 @@ static __init int hardware_setup(void) if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) return -EINVAL; - if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt()) + if (!enable_ept || !kvm_caps.enable_pmu || !cpu_has_vmx_intel_pt()) pt_mode = PT_MODE_SYSTEM; if (pt_mode == PT_MODE_HOST_GUEST) vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cdcda1bbf5a3..36d471a7af87 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -94,6 +94,7 @@ struct kvm_caps kvm_caps __read_mostly = { .supported_mce_cap = MCG_CTL_P | MCG_SER_P, + .enable_pmu = true, }; EXPORT_SYMBOL_GPL(kvm_caps); @@ -192,9 +193,7 @@ int __read_mostly pi_inject_timer = -1; module_param(pi_inject_timer, bint, 0644); /* Enable/disable PMU virtualization */ -bool __read_mostly enable_pmu = true; -EXPORT_SYMBOL_GPL(enable_pmu); -module_param(enable_pmu, bool, 0444); +module_param_named(enable_pmu, kvm_caps.enable_pmu, bool, 0444); bool __read_mostly eager_page_split = true; module_param(eager_page_split, bool, 0644); @@ -4815,7 +4814,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; } case KVM_CAP_PMU_CAPABILITY: - r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; + r = kvm_caps.enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; break; case KVM_CAP_DISABLE_QUIRKS2: r = KVM_X86_VALID_QUIRKS; @@ -6652,7 +6651,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, break; case KVM_CAP_PMU_CAPABILITY: r = -EINVAL; - if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) + if (!kvm_caps.enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) break; mutex_lock(&kvm->lock); @@ -7438,7 +7437,7 @@ static void kvm_init_msr_lists(void) for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++) kvm_probe_msr_to_save(msrs_to_save_base[i]); - if (enable_pmu) { + if (kvm_caps.enable_pmu) { for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++) kvm_probe_msr_to_save(msrs_to_save_pmu[i]); } @@ -12555,7 +12554,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; kvm->arch.guest_can_read_msr_platform_info = true; - kvm->arch.enable_pmu = enable_pmu; + kvm->arch.enable_pmu = kvm_caps.enable_pmu; #if IS_ENABLED(CONFIG_HYPERV) spin_lock_init(&kvm->arch.hv_root_tdp_lock); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 102754dc85bc..c4d99338aaa1 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -29,6 +29,9 @@ struct kvm_caps { u64 supported_xcr0; u64 supported_xss; u64 supported_perf_cap; + + /* KVM module parameters */ + bool enable_pmu; }; struct kvm_host_values { @@ -340,8 +343,6 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); extern struct kvm_caps kvm_caps; extern struct kvm_host_values kvm_host; -extern bool enable_pmu;
On Thu, Apr 25, 2024, Wei W Wang wrote: > On Wednesday, April 24, 2024 6:15 AM, Sean Christopherson wrote: > > Add a global "kvm_host" structure to hold various host values, e.g. for EFER, > > XCR0, raw MAXPHYADDR etc., instead of having a bunch of one-off variables > > that inevitably need to be exported, or in the case of shadow_phys_bits, are > > buried in a random location and are awkward to use, leading to duplicate > > code. > > Looks good. How about applying similar improvements to the module > parameters as well? I've changed the "enable_pmu" parameter as an example below: Hmm, I don't hate the idea, but I don't think it would work as well in practice. For kvm_host, all of the fields it contains were being namespace with "host_<asset>", And the globals that became kvm_caps all had some variant of kvm_ or kvm_has_ as a prefix. For module params and other knobs, the thing being controlled is usually unique to KVM, and often fairly self-descriptive, so we haven't had to namespace them muc. And we have params across kvm.ko, kvm-amd.ko, and kvm-intel.ko, which sometimes weird splits in responsibilities, e.g. enable_apicv is defined by common x86, but the module params themsleves are defined by SVM and VMX, and for SVM it's an alias of avic. > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > index 77352a4abd87..a221ba7b546f 100644 > --- a/arch/x86/kvm/cpuid.c > +++ b/arch/x86/kvm/cpuid.c > @@ -1013,7 +1013,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) > union cpuid10_eax eax; > union cpuid10_edx edx; > > - if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { > + if (!kvm_caps.enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { If we did try to collect module params in a struct, it should be a unique struct, because they aren't pure capabilities or host values. > entry->eax = entry->ebx = entry->ecx = entry->edx = 0; > break; > }
On Tue, 23 Apr 2024 15:15:17 -0700, Sean Christopherson wrote: > Add a global "kvm_host" structure to hold various host values, e.g. for > EFER, XCR0, raw MAXPHYADDR etc., instead of having a bunch of one-off > variables that inevitably need to be exported, or in the case of > shadow_phys_bits, are buried in a random location and are awkward to use, > leading to duplicate code. > > Sean Christopherson (4): > KVM: x86: Add a struct to consolidate host values, e.g. EFER, XCR0, > etc... > KVM: SVM: Use KVM's snapshot of the host's XCR0 for SEV-ES host state > KVM: x86/mmu: Snapshot shadow_phys_bits when kvm.ko is loaded > KVM: x86: Move shadow_phys_bits into "kvm_host", as "maxphyaddr" > > [...] Applied to kvm-x86 misc, thanks! [1/4] KVM: x86: Add a struct to consolidate host values, e.g. EFER, XCR0, etc... https://github.com/kvm-x86/linux/commit/7974c0643ee3 [2/4] KVM: SVM: Use KVM's snapshot of the host's XCR0 for SEV-ES host state https://github.com/kvm-x86/linux/commit/52c47f5897b6 [3/4] KVM: x86/mmu: Snapshot shadow_phys_bits when kvm.ko is loaded https://github.com/kvm-x86/linux/commit/c043eaaa6be0 [4/4] KVM: x86: Move shadow_phys_bits into "kvm_host", as "maxphyaddr" https://github.com/kvm-x86/linux/commit/82897db91215 -- https://github.com/kvm-x86/linux/tree/next