Message ID | 20230814115108.45741-5-cloudliang@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: selftests: Test the consistency of the PMU's CPUID and its features | expand |
On Mon, Aug 14, 2023, Jinrong Liang wrote: > From: Jinrong Liang <cloudliang@tencent.com> > > Update test to cover Intel PMU architectural events on fixed counters. > Per Intel SDM, PMU users can also count architecture performance events > on fixed counters (specifically, FIXED_CTR0 for the retired instructions > and FIXED_CTR1 for cpu core cycles event). Therefore, if guest's CPUID > indicates that an architecture event is not available, the corresponding > fixed counter will also not count that event. > > Co-developed-by: Like Xu <likexu@tencent.com> > Signed-off-by: Like Xu <likexu@tencent.com> > Signed-off-by: Jinrong Liang <cloudliang@tencent.com> > --- > .../kvm/x86_64/pmu_basic_functionality_test.c | 21 +++++++++++++++++++ > 1 file changed, 21 insertions(+) > > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > index c04eb0bdf69f..daa45aa285bb 100644 > --- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > +++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > @@ -47,6 +47,7 @@ static uint64_t run_vcpu(struct kvm_vcpu *vcpu, uint64_t *ucall_arg) > > static void guest_measure_loop(uint64_t event_code) > { > + uint32_t nr_fixed_counter = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); There's zero reason to cache this. > uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); > uint32_t pmu_version = this_cpu_property(X86_PROPERTY_PMU_VERSION); > uint32_t counter_msr; > @@ -73,6 +74,26 @@ static void guest_measure_loop(uint64_t event_code) > } > } > > + if (pmu_version < 2 || nr_fixed_counter < 1) Because you can simply do: if (pmu_version < 2 || this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS) < 1) goto done; > + goto done; > + > + if (event_code == intel_arch_events[INTEL_ARCH_INSTRUCTIONS_RETIRED]) > + i = 0; > + else if (event_code == intel_arch_events[INTEL_ARCH_CPU_CYCLES]) > + i = 1; > + else > + goto done; > + > + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0); > + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * i)); > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(INTEL_PMC_IDX_FIXED + i)); > + > + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); > + > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); > + GUEST_SYNC(_rdpmc(RDPMC_FIXED_BASE | i)); > + > +done: > GUEST_DONE(); > } > > -- > 2.39.3 >
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c index c04eb0bdf69f..daa45aa285bb 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c @@ -47,6 +47,7 @@ static uint64_t run_vcpu(struct kvm_vcpu *vcpu, uint64_t *ucall_arg) static void guest_measure_loop(uint64_t event_code) { + uint32_t nr_fixed_counter = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint32_t pmu_version = this_cpu_property(X86_PROPERTY_PMU_VERSION); uint32_t counter_msr; @@ -73,6 +74,26 @@ static void guest_measure_loop(uint64_t event_code) } } + if (pmu_version < 2 || nr_fixed_counter < 1) + goto done; + + if (event_code == intel_arch_events[INTEL_ARCH_INSTRUCTIONS_RETIRED]) + i = 0; + else if (event_code == intel_arch_events[INTEL_ARCH_CPU_CYCLES]) + i = 1; + else + goto done; + + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0); + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * i)); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(INTEL_PMC_IDX_FIXED + i)); + + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + GUEST_SYNC(_rdpmc(RDPMC_FIXED_BASE | i)); + +done: GUEST_DONE(); }