diff mbox series

[v4,6/9] KVM: selftests: Test consistency of CPUID with num of gp counters

Message ID 20230911114347.85882-7-cloudliang@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Test the consistency of the PMU's CPUID and its features | expand

Commit Message

Jinrong Liang Sept. 11, 2023, 11:43 a.m. UTC
From: Jinrong Liang <cloudliang@tencent.com>

Add test to check if non-existent counters can be accessed in guest after
determining the number of Intel generic performance counters by CPUID.
When the num of counters is less than 3, KVM does not emulate #GP if
a counter isn't present due to compatibility MSR_P6_PERFCTRx handling.
Nor will the KVM emulate more counters than it can support.

Co-developed-by: Like Xu <likexu@tencent.com>
Signed-off-by: Like Xu <likexu@tencent.com>
Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
---
 .../selftests/kvm/x86_64/pmu_counters_test.c  | 85 +++++++++++++++++++
 1 file changed, 85 insertions(+)

Comments

Sean Christopherson Oct. 20, 2023, 6:08 p.m. UTC | #1
On Mon, Sep 11, 2023, Jinrong Liang wrote:
> From: Jinrong Liang <cloudliang@tencent.com>
> 
> Add test to check if non-existent counters can be accessed in guest after
> determining the number of Intel generic performance counters by CPUID.
> When the num of counters is less than 3, KVM does not emulate #GP if
> a counter isn't present due to compatibility MSR_P6_PERFCTRx handling.
> Nor will the KVM emulate more counters than it can support.
> 
> Co-developed-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
> ---
>  .../selftests/kvm/x86_64/pmu_counters_test.c  | 85 +++++++++++++++++++
>  1 file changed, 85 insertions(+)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> index fe9f38a3557e..e636323e202c 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> @@ -17,6 +17,11 @@
>  /* Guest payload for any performance counter counting */
>  #define NUM_BRANCHES		10
>  
> +static const uint64_t perf_caps[] = {
> +	0,
> +	PMU_CAP_FW_WRITES,
> +};

Put this on the stack in the one testcase that uses it.  Placing the array super
far away from its use makes it unnecessarily difficult to see that the testcase
is simply running with and without full-width writes.

>  static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
>  						  void *guest_code)
>  {
> @@ -189,6 +194,85 @@ static void test_intel_arch_events(void)
>  	}
>  }
>  
> +static void __guest_wrmsr_rdmsr(uint32_t counter_msr, uint8_t nr_msrs,
> +				bool expect_gp)

Rather than pass in "expect_gp", compute it in here.  It's easy enough to explicitly
check for MSR_P6_PERFCTR[0|1]

> +{
> +	uint64_t msr_val;
> +	uint8_t vector;
> +
> +	vector = wrmsr_safe(counter_msr + nr_msrs, 0xffff);

Doing all this work to test _one_ MSR at a time is silly.  And I see no reason
to do only negative testing.  Sure, postive testing might be redundant with other
tests (I truly don't know), but _not_ hardcoding one-off tests often ends up
requiring less code, and almost always results in more self-documenting code.
E.g. it took me far too much staring to understand why the "no #GP" case expects
to read back '0'.

> +	__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,
> +		       "Expected GP_VECTOR");

Print the actual vector!  And the MSR!  One of my pet peeves with KVM's tests is
not providing information on failure.  Having to hack a test or do interactive
debug just to figure out which MSR failed is *super* frustrating.

And I think it's worth providing a macro to handle the assertion+message, that
way it'll be easier to add more sub-tests, e.g. that the MSR can be written back
to '0'.

> +
> +	vector = rdmsr_safe(counter_msr + nr_msrs, &msr_val);
> +	__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,
> +		       "Expected GP_VECTOR");
> +
> +	if (!expect_gp)
> +		GUEST_ASSERT_EQ(msr_val, 0);
> +
> +	GUEST_DONE();
> +}
> +
> +static void guest_rd_wr_gp_counter(void)
> +{
> +	uint8_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
> +	uint64_t perf_capabilities = rdmsr(MSR_IA32_PERF_CAPABILITIES);
> +	uint32_t counter_msr;
> +	bool expect_gp = true;
> +
> +	if (perf_capabilities & PMU_CAP_FW_WRITES) {
> +		counter_msr = MSR_IA32_PMC0;
> +	} else {
> +		counter_msr = MSR_IA32_PERFCTR0;
> +
> +		/* KVM drops writes to MSR_P6_PERFCTR[0|1]. */
> +		if (nr_gp_counters == 0)
> +			expect_gp = false;
> +	}
> +
> +	__guest_wrmsr_rdmsr(counter_msr, nr_gp_counters, expect_gp);
> +}
> +
> +/* Access the first out-of-range counter register to trigger #GP */
> +static void test_oob_gp_counter(uint8_t eax_gp_num, uint64_t perf_cap)
> +{
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_vm *vm;
> +
> +	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_rd_wr_gp_counter);
> +
> +	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
> +				eax_gp_num);
> +	vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap);
> +
> +	run_vcpu(vcpu);
> +
> +	kvm_vm_free(vm);
> +}
> +
> +static void test_intel_counters_num(void)
> +{
> +	uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
> +	unsigned int i;
> +
> +	TEST_REQUIRE(nr_gp_counters > 2);

This is beyond silly.  Just iterate over all possible counter values.  Again,
hardcoding values is almost never the best way to do things.

> +
> +	for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
> +		/*
> +		 * For compatibility reasons, KVM does not emulate #GP
> +		 * when MSR_P6_PERFCTR[0|1] is not present, but it doesn't
> +		 * affect checking the presence of MSR_IA32_PMCx with #GP.
> +		 */
> +		test_oob_gp_counter(0, perf_caps[i]);
> +		test_oob_gp_counter(2, perf_caps[i]);
> +		test_oob_gp_counter(nr_gp_counters, perf_caps[i]);
> +
> +		/* KVM doesn't emulate more counters than it can support. */
> +		test_oob_gp_counter(nr_gp_counters + 1, perf_caps[i]);

Hmm, so I think we should avoid blindly testing undefined MSRs.  I don't disagree
that expecting #GP is reasonable, but I don't think this is the right place to
test for architecturally undefined MSRs.  E.g. if Intel defines some completely
unrelated MSR at 0xc3 or 0x4c9 then this test will fail.

Rather than assume anything about "nr_gp_counters + 1", I think we should test up
to what Intel has architecturally defined, e.g. define the max number of counters
and then pass that in as the "possible" counters:

#define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector)		\
__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,			\
	       "Expected %s on " #insn "(0x%x), got vector %u",			\
	       expect_gp ? "#GP" : "no fault", msr, vector)			\

static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters,
				 uint8_t nr_counters, uint32_t or_mask)
{
	uint8_t i;

	for (i = 0; i < nr_possible_counters; i++) {
		const uint32_t msr = base_msr + i;

		/*
		 * Fixed counters are supported if the counter is less than the
		 * number of enumerated contiguous counters *or* the counter is
		 * explicitly enumerated in the supported counters mask.
		 */
		const bool expect_success = i < nr_counters || (or_mask & BIT(i));

		/*
		 * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are
		 * unsupported, i.e. doesn't #GP and reads back '0'.
		 */
		const uint64_t expected_val = expect_success ? 0xffff : 0;
		const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 &&
				       msr != MSR_P6_PERFCTR1;
		uint8_t vector;
		uint64_t val;

		vector = wrmsr_safe(msr, 0xffff);
		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);

		vector = rdmsr_safe(msr, &val);
		GUEST_ASSERT_PMC_MSR_ACCESS(RDMSR, msr, expect_gp, vector);

		/* On #GP, the result of RDMSR is undefined. */
		if (!expect_gp)
			__GUEST_ASSERT(val == expected_val,
				       "Expected RDMSR(0x%x) to yield 0x%lx, got 0x%lx",
				       msr, expected_val, val);

		vector = wrmsr_safe(msr, 0);
		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
	}
}
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index fe9f38a3557e..e636323e202c 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -17,6 +17,11 @@ 
 /* Guest payload for any performance counter counting */
 #define NUM_BRANCHES		10
 
+static const uint64_t perf_caps[] = {
+	0,
+	PMU_CAP_FW_WRITES,
+};
+
 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 						  void *guest_code)
 {
@@ -189,6 +194,85 @@  static void test_intel_arch_events(void)
 	}
 }
 
+static void __guest_wrmsr_rdmsr(uint32_t counter_msr, uint8_t nr_msrs,
+				bool expect_gp)
+{
+	uint64_t msr_val;
+	uint8_t vector;
+
+	vector = wrmsr_safe(counter_msr + nr_msrs, 0xffff);
+	__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,
+		       "Expected GP_VECTOR");
+
+	vector = rdmsr_safe(counter_msr + nr_msrs, &msr_val);
+	__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,
+		       "Expected GP_VECTOR");
+
+	if (!expect_gp)
+		GUEST_ASSERT_EQ(msr_val, 0);
+
+	GUEST_DONE();
+}
+
+static void guest_rd_wr_gp_counter(void)
+{
+	uint8_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+	uint64_t perf_capabilities = rdmsr(MSR_IA32_PERF_CAPABILITIES);
+	uint32_t counter_msr;
+	bool expect_gp = true;
+
+	if (perf_capabilities & PMU_CAP_FW_WRITES) {
+		counter_msr = MSR_IA32_PMC0;
+	} else {
+		counter_msr = MSR_IA32_PERFCTR0;
+
+		/* KVM drops writes to MSR_P6_PERFCTR[0|1]. */
+		if (nr_gp_counters == 0)
+			expect_gp = false;
+	}
+
+	__guest_wrmsr_rdmsr(counter_msr, nr_gp_counters, expect_gp);
+}
+
+/* Access the first out-of-range counter register to trigger #GP */
+static void test_oob_gp_counter(uint8_t eax_gp_num, uint64_t perf_cap)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+
+	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_rd_wr_gp_counter);
+
+	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
+				eax_gp_num);
+	vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap);
+
+	run_vcpu(vcpu);
+
+	kvm_vm_free(vm);
+}
+
+static void test_intel_counters_num(void)
+{
+	uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+	unsigned int i;
+
+	TEST_REQUIRE(nr_gp_counters > 2);
+
+	for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
+		/*
+		 * For compatibility reasons, KVM does not emulate #GP
+		 * when MSR_P6_PERFCTR[0|1] is not present, but it doesn't
+		 * affect checking the presence of MSR_IA32_PMCx with #GP.
+		 */
+		test_oob_gp_counter(0, perf_caps[i]);
+		test_oob_gp_counter(2, perf_caps[i]);
+		test_oob_gp_counter(nr_gp_counters, perf_caps[i]);
+
+		/* KVM doesn't emulate more counters than it can support. */
+		test_oob_gp_counter(nr_gp_counters + 1, perf_caps[i]);
+	}
+}
+
 int main(int argc, char *argv[])
 {
 	TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
@@ -199,6 +283,7 @@  int main(int argc, char *argv[])
 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
 
 	test_intel_arch_events();
+	test_intel_counters_num();
 
 	return 0;
 }