Message ID | 20230814115108.45741-6-cloudliang@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: selftests: Test the consistency of the PMU's CPUID and its features | expand |
On Mon, Aug 14, 2023, Jinrong Liang wrote: > From: Jinrong Liang <cloudliang@tencent.com> > > Add test to check if non-existent counters can be accessed in guest after > determining the number of Intel generic performance counters by CPUID. > When the num of counters is less than 3, KVM does not emulate #GP if > a counter isn't present due to compatibility MSR_P6_PERFCTRx handling. > Nor will the KVM emulate more counters than it can support. > > Co-developed-by: Like Xu <likexu@tencent.com> > Signed-off-by: Like Xu <likexu@tencent.com> > Signed-off-by: Jinrong Liang <cloudliang@tencent.com> > --- > .../kvm/x86_64/pmu_basic_functionality_test.c | 78 +++++++++++++++++++ > 1 file changed, 78 insertions(+) > > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > index daa45aa285bb..b86033e51d5c 100644 > --- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > +++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > @@ -16,6 +16,11 @@ > /* Guest payload for any performance counter counting */ > #define NUM_BRANCHES 10 > > +static const uint64_t perf_caps[] = { > + 0, > + PMU_CAP_FW_WRITES, > +}; > + > static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, > void *guest_code) > { > @@ -164,6 +169,78 @@ static void intel_test_arch_events(void) > } > } > > +static void guest_wr_and_rd_msrs(uint32_t base, uint8_t begin, uint8_t offset) > +{ > + uint8_t wr_vector, rd_vector; > + uint64_t msr_val; > + unsigned int i; > + > + for (i = begin; i < begin + offset; i++) { > + wr_vector = wrmsr_safe(base + i, 0xffff); > + rd_vector = rdmsr_safe(base + i, &msr_val); > + if (wr_vector == GP_VECTOR || rd_vector == GP_VECTOR) > + GUEST_SYNC(GP_VECTOR); Rather than pass around the "expected" vector, and shuffle #GP vs. the msr_val up (which can get false negatives if msr_val == 13), just read MSR_IA32_PERF_CAPABILITIES from within the guest and GUEST_ASSERT accordingly.
On Thu, Aug 17, 2023, Sean Christopherson wrote: > On Mon, Aug 14, 2023, Jinrong Liang wrote: > > From: Jinrong Liang <cloudliang@tencent.com> > > > > Add test to check if non-existent counters can be accessed in guest after > > determining the number of Intel generic performance counters by CPUID. > > When the num of counters is less than 3, KVM does not emulate #GP if > > a counter isn't present due to compatibility MSR_P6_PERFCTRx handling. > > Nor will the KVM emulate more counters than it can support. > > > > Co-developed-by: Like Xu <likexu@tencent.com> > > Signed-off-by: Like Xu <likexu@tencent.com> > > Signed-off-by: Jinrong Liang <cloudliang@tencent.com> > > --- > > .../kvm/x86_64/pmu_basic_functionality_test.c | 78 +++++++++++++++++++ > > 1 file changed, 78 insertions(+) > > > > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > > index daa45aa285bb..b86033e51d5c 100644 > > --- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > > +++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c > > @@ -16,6 +16,11 @@ > > /* Guest payload for any performance counter counting */ > > #define NUM_BRANCHES 10 > > > > +static const uint64_t perf_caps[] = { > > + 0, > > + PMU_CAP_FW_WRITES, > > +}; > > + > > static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, > > void *guest_code) > > { > > @@ -164,6 +169,78 @@ static void intel_test_arch_events(void) > > } > > } > > > > +static void guest_wr_and_rd_msrs(uint32_t base, uint8_t begin, uint8_t offset) > > +{ > > + uint8_t wr_vector, rd_vector; > > + uint64_t msr_val; > > + unsigned int i; > > + > > + for (i = begin; i < begin + offset; i++) { > > + wr_vector = wrmsr_safe(base + i, 0xffff); > > + rd_vector = rdmsr_safe(base + i, &msr_val); Unless I'm missing something, there is zero reason to pass "base" and "being" separately, just do the math in the host. A "base" that isn't actually the base when viewed without the full context is super confusing. > > + if (wr_vector == GP_VECTOR || rd_vector == GP_VECTOR) > > + GUEST_SYNC(GP_VECTOR); > > Rather than pass around the "expected" vector, and shuffle #GP vs. the msr_val > up (which can get false negatives if msr_val == 13), just read > MSR_IA32_PERF_CAPABILITIES from within the guest and GUEST_ASSERT accordingly. Ah, you did that so that the fixed counter test can reuse the guest code. Just use separate trampolines in the guest, e.g. static void __guest_wrmsr_rdmsr(uint32_t base, uint8_t nr_msrs, bool expect_gp) { uint64_t msr_val; uint8_t vector; uint32_t i; for (i = base; i < base + nr_msrs; i++) { vector = wrmsr_safe(i, 0xffff); GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, "..."); vector = rdmsr_safe(i, &msr_val); GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, "..."); if (!expect_gp) GUEST_ASSERT_EQ(msr_val, 0); } GUEST_DONE(); } static void guest_rd_wr_fixed_counter(uint32_t base, uint8_t nr_msrs) { __guest_wrmsr_rdmsr(base, nr_msrs, true); } static void guest_rd_wr_gp_counter(uint32_t base, uint8_t nr_msrs) { uint64_t perf_capabilities = rdmsr(); __guest_wrmsr_rdmsr(base, nr_msrs, !!perf_capabilities); }
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c index daa45aa285bb..b86033e51d5c 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c @@ -16,6 +16,11 @@ /* Guest payload for any performance counter counting */ #define NUM_BRANCHES 10 +static const uint64_t perf_caps[] = { + 0, + PMU_CAP_FW_WRITES, +}; + static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, void *guest_code) { @@ -164,6 +169,78 @@ static void intel_test_arch_events(void) } } +static void guest_wr_and_rd_msrs(uint32_t base, uint8_t begin, uint8_t offset) +{ + uint8_t wr_vector, rd_vector; + uint64_t msr_val; + unsigned int i; + + for (i = begin; i < begin + offset; i++) { + wr_vector = wrmsr_safe(base + i, 0xffff); + rd_vector = rdmsr_safe(base + i, &msr_val); + if (wr_vector == GP_VECTOR || rd_vector == GP_VECTOR) + GUEST_SYNC(GP_VECTOR); + else + GUEST_SYNC(msr_val); + } + + GUEST_DONE(); +} + +/* Access the first out-of-range counter register to trigger #GP */ +static void test_oob_gp_counter(uint8_t eax_gp_num, uint8_t offset, + uint64_t perf_cap, uint64_t expected) +{ + uint32_t ctr_msr = MSR_IA32_PERFCTR0; + struct kvm_vcpu *vcpu; + uint64_t msr_val = 0; + struct kvm_vm *vm; + + vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_wr_and_rd_msrs); + + vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS, + eax_gp_num); + + if (perf_cap & PMU_CAP_FW_WRITES) + ctr_msr = MSR_IA32_PMC0; + + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap); + vcpu_args_set(vcpu, 3, ctr_msr, eax_gp_num, offset); + while (run_vcpu(vcpu, &msr_val) != UCALL_DONE) + TEST_ASSERT_EQ(expected, msr_val); + + kvm_vm_free(vm); +} + +static void intel_test_counters_num(void) +{ + uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + unsigned int i; + + TEST_REQUIRE(nr_gp_counters > 2); + + for (i = 0; i < ARRAY_SIZE(perf_caps); i++) { + /* + * For compatibility reasons, KVM does not emulate #GP + * when MSR_P6_PERFCTR[0|1] is not present, but it doesn't + * affect checking the presence of MSR_IA32_PMCx with #GP. + */ + if (perf_caps[i] & PMU_CAP_FW_WRITES) + test_oob_gp_counter(0, 1, perf_caps[i], GP_VECTOR); + + test_oob_gp_counter(2, 1, perf_caps[i], GP_VECTOR); + test_oob_gp_counter(nr_gp_counters, 1, perf_caps[i], GP_VECTOR); + + /* KVM doesn't emulate more counters than it can support. */ + test_oob_gp_counter(nr_gp_counters + 1, 1, perf_caps[i], + GP_VECTOR); + + /* Test that KVM drops writes to MSR_P6_PERFCTR[0|1]. */ + if (!perf_caps[i]) + test_oob_gp_counter(0, 2, perf_caps[i], 0); + } +} + int main(int argc, char *argv[]) { TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); @@ -174,6 +251,7 @@ int main(int argc, char *argv[]) TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); intel_test_arch_events(); + intel_test_counters_num(); return 0; }