diff mbox series

[1/9] KVM: selftests: Add forced emulation check to fix #UD

Message ID 20231121115457.76269-2-cloudliang@tencent.com (mailing list archive)
State New, archived
Headers show
Series Test the consistency of AMD PMU counters and their features | expand

Commit Message

Jinrong Liang Nov. 21, 2023, 11:54 a.m. UTC
Forced emulation uses a magic "prefix" to trigger a #UD, which KVM
intercepts. If forced emulation isn't enabled, KVM ignores the magic
prefix and reflects the #UD back into the guest.

Reported-by: Jinrong Liang <cloudliang@tencent.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Jinrong Liang <cloudliang@tencent.com>
---
 .../selftests/kvm/x86_64/pmu_counters_test.c  | 41 ++++++++++++-------
 1 file changed, 26 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index 248ebe8c0577..7d8094a27209 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -325,6 +325,26 @@  __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,			\
 		       "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx",	\
 		       msr, expected_val, val);
 
+static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success,
+			     uint64_t expected_val)
+{
+	uint8_t vector;
+	uint64_t val;
+
+	vector = rdpmc_safe(rdpmc_idx, &val);
+	GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
+	if (expect_success)
+		GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
+
+	if (!is_forced_emulation_enabled)
+		return;
+
+	vector = rdpmc_safe_fep(rdpmc_idx, &val);
+	GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
+	if (expect_success)
+		GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
+}
+
 static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters,
 				 uint8_t nr_counters, uint32_t or_mask)
 {
@@ -367,20 +387,15 @@  static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
 		if (!expect_gp)
 			GUEST_ASSERT_PMC_VALUE(RDMSR, msr, val, expected_val);
 
+		/*
+		 * Redo the read tests with RDPMC, which has different indexing
+		 * semantics and additional capabilities.
+		 */
 		rdpmc_idx = i;
 		if (base_msr == MSR_CORE_PERF_FIXED_CTR0)
 			rdpmc_idx |= INTEL_RDPMC_FIXED;
 
-		/* Redo the read tests with RDPMC, and with forced emulation. */
-		vector = rdpmc_safe(rdpmc_idx, &val);
-		GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
-		if (expect_success)
-			GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
-
-		vector = rdpmc_safe_fep(rdpmc_idx, &val);
-		GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
-		if (expect_success)
-			GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
+		guest_test_rdpmc(rdpmc_idx, expect_success, expected_val);
 
 		/*
 		 * KVM doesn't support non-architectural PMUs, i.e. it should
@@ -390,11 +405,7 @@  static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
 		GUEST_ASSERT(!expect_success || !pmu_has_fast_mode);
 		rdpmc_idx |= INTEL_RDPMC_FAST;
 
-		vector = rdpmc_safe(rdpmc_idx, &val);
-		GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, true, vector);
-
-		vector = rdpmc_safe_fep(rdpmc_idx, &val);
-		GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, true, vector);
+		guest_test_rdpmc(rdpmc_idx, false, -1ull);
 
 		vector = wrmsr_safe(msr, 0);
 		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);