diff mbox series

[v4,4/9] KVM: selftests: Test Intel PMU architectural events on gp counters

Message ID 20230911114347.85882-5-cloudliang@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Test the consistency of the PMU's CPUID and its features | expand

Commit Message

Jinrong Liang Sept. 11, 2023, 11:43 a.m. UTC
From: Jinrong Liang <cloudliang@tencent.com>

Add test cases to check if different Architectural events are available
after it's marked as unavailable via CPUID. It covers vPMU event filtering
logic based on Intel CPUID, which is a complement to pmu_event_filter.

According to Intel SDM, the number of architectural events is reported
through CPUID.0AH:EAX[31:24] and the architectural event x is supported
if EBX[x]=0 && EAX[31:24]>x.

Co-developed-by: Like Xu <likexu@tencent.com>
Signed-off-by: Like Xu <likexu@tencent.com>
Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../selftests/kvm/x86_64/pmu_counters_test.c  | 182 ++++++++++++++++++
 2 files changed, 183 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

Comments

Sean Christopherson Oct. 19, 2023, 11:39 p.m. UTC | #1
On Mon, Sep 11, 2023, Jinrong Liang wrote:
> +static void test_intel_arch_events(void)
> +{
> +	uint8_t idx;
> +
> +	for (idx = 0; idx < NR_INTEL_ARCH_EVENTS; idx++) {
> +		/*
> +		 * Given the stability of performance event recurrence, only
> +		 * these arch events are currently being tested:
> +		 *
> +		 * - Core cycle event (idx = 0)
> +		 * - Instruction retired event (idx = 1)
> +		 * - Reference cycles event (idx = 2)
> +		 * - Branch instruction retired event (idx = 5)
> +		 */
> +		if (idx > INTEL_ARCH_INSTRUCTIONS_RETIRED &&
> +		    idx != INTEL_ARCH_BRANCHES_RETIRED)
> +			continue;
> +
> +		check_arch_event_is_unavl(idx);

Rather than completely skip the event, just don't check the counter.  That way
the test still verifies that it can program the event, it's only the result that
isn't stable enough to assert on.
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 172c4223b286..7768e09de96c 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -81,6 +81,7 @@  TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
 TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
 TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test
 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 += x86_64/pmu_counters_test
 TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
 TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
new file mode 100644
index 000000000000..f47853f3ab84
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -0,0 +1,182 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test the consistency of the PMU's CPUID and its features
+ *
+ * Copyright (C) 2023, Tencent, Inc.
+ *
+ * Check that the VM's PMU behaviour is consistent with the
+ * VM CPUID definition.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <x86intrin.h>
+
+#include "pmu.h"
+#include "processor.h"
+
+/* Guest payload for any performance counter counting */
+#define NUM_BRANCHES		10
+
+static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+						  void *guest_code)
+{
+	struct kvm_vm *vm;
+
+	vm = vm_create_with_one_vcpu(vcpu, guest_code);
+	vm_init_descriptor_tables(vm);
+	vcpu_init_descriptor_tables(*vcpu);
+
+	return vm;
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu)
+{
+	struct ucall uc;
+
+	do {
+		vcpu_run(vcpu);
+		switch (get_ucall(vcpu, &uc)) {
+		case UCALL_SYNC:
+			break;
+		case UCALL_ABORT:
+			REPORT_GUEST_ASSERT(uc);
+			break;
+		case UCALL_DONE:
+			break;
+		default:
+			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+		}
+	} while (uc.cmd != UCALL_DONE);
+}
+
+static void guest_measure_pmu_v1(uint8_t idx, uint32_t counter_msr,
+				 uint32_t nr_gp_counters, bool expect)
+{
+	unsigned int i;
+
+	for (i = 0; i < nr_gp_counters; i++) {
+		wrmsr(counter_msr + i, 0);
+		wrmsr(MSR_P6_EVNTSEL0 + i, ARCH_PERFMON_EVENTSEL_OS |
+		      ARCH_PERFMON_EVENTSEL_ENABLE |
+		      intel_pmu_arch_events[idx]);
+		__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+		GUEST_ASSERT_EQ(expect, !!_rdpmc(i));
+
+		wrmsr(MSR_P6_EVNTSEL0 + i, ARCH_PERFMON_EVENTSEL_OS |
+		      !ARCH_PERFMON_EVENTSEL_ENABLE |
+		      intel_pmu_arch_events[idx]);
+		wrmsr(counter_msr + i, 0);
+		__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+		GUEST_ASSERT(!_rdpmc(i));
+	}
+
+	GUEST_DONE();
+}
+
+static void guest_measure_loop(uint8_t idx)
+{
+	uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+	uint32_t pmu_version = this_cpu_property(X86_PROPERTY_PMU_VERSION);
+	uint32_t counter_msr;
+	unsigned int i;
+	bool expect;
+
+	if (rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
+		counter_msr = MSR_IA32_PMC0;
+	else
+		counter_msr = MSR_IA32_PERFCTR0;
+
+	expect = this_pmu_has_arch_event(KVM_X86_PMU_FEATURE(UNUSED, idx));
+
+	if (pmu_version < 2) {
+		guest_measure_pmu_v1(idx, counter_msr, nr_gp_counters, expect);
+		return;
+	}
+
+	for (i = 0; i < nr_gp_counters; i++) {
+		wrmsr(counter_msr + i, 0);
+		wrmsr(MSR_P6_EVNTSEL0 + i, ARCH_PERFMON_EVENTSEL_OS |
+		      ARCH_PERFMON_EVENTSEL_ENABLE |
+		      intel_pmu_arch_events[idx]);
+
+		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(i));
+		__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+		GUEST_ASSERT_EQ(expect, !!_rdpmc(i));
+	}
+
+	GUEST_DONE();
+}
+
+static void test_arch_events_cpuid(uint8_t i, uint8_t j, uint8_t idx)
+{
+	uint8_t arch_events_unavailable_mask = BIT_ULL(j);
+	uint8_t arch_events_bitmap_size = BIT_ULL(i);
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+
+	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_measure_loop);
+
+	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
+				arch_events_bitmap_size);
+	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EVENTS_MASK,
+				arch_events_unavailable_mask);
+
+	vcpu_args_set(vcpu, 1, idx);
+
+	run_vcpu(vcpu);
+
+	kvm_vm_free(vm);
+}
+
+static void check_arch_event_is_unavl(uint8_t idx)
+{
+	uint8_t i, j;
+
+	/*
+	 * A brute force iteration of all combinations of values is likely to
+	 * exhaust the limit of the single-threaded thread fd nums, so it's
+	 * tested here by iterating through all valid values on a single bit.
+	 */
+	for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) {
+		for (j = 0; j < NR_INTEL_ARCH_EVENTS; j++)
+			test_arch_events_cpuid(i, j, idx);
+	}
+}
+
+static void test_intel_arch_events(void)
+{
+	uint8_t idx;
+
+	for (idx = 0; idx < NR_INTEL_ARCH_EVENTS; idx++) {
+		/*
+		 * Given the stability of performance event recurrence, only
+		 * these arch events are currently being tested:
+		 *
+		 * - Core cycle event (idx = 0)
+		 * - Instruction retired event (idx = 1)
+		 * - Reference cycles event (idx = 2)
+		 * - Branch instruction retired event (idx = 5)
+		 */
+		if (idx > INTEL_ARCH_INSTRUCTIONS_RETIRED &&
+		    idx != INTEL_ARCH_BRANCHES_RETIRED)
+			continue;
+
+		check_arch_event_is_unavl(idx);
+	}
+}
+
+int main(int argc, char *argv[])
+{
+	TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
+
+	TEST_REQUIRE(host_cpu_is_intel);
+	TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
+	TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
+	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
+
+	test_intel_arch_events();
+
+	return 0;
+}