diff mbox series

[3/4] selftests: kvm/x86: Add testing for masked events

Message ID 20220523214110.1282480-4-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: x86/pmu: Introduce and test masked events | expand

Commit Message

Aaron Lewis May 23, 2022, 9:41 p.m. UTC
Add testing for the pmu event filter's masked events.  These tests run
through different ways of finding an event the guest is attempting to
program in an event list.  For any given eventsel, there may be
multiple instances of it in an event list.  These tests try different
ways of looking up a match to force the matching algorithm to walk the
relevant eventsel's and ensure it is able to a) find a match, b) stays
within its bounds.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 .../kvm/x86_64/pmu_event_filter_test.c        | 107 ++++++++++++++++++
 1 file changed, 107 insertions(+)
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 4bff4c71ac45..4071043bbe26 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -18,8 +18,12 @@ 
 /*
  * In lieu of copying perf_event.h into tools...
  */
+#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
+#define AMD64_EVENTSEL_EVENT	\
+	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
+
 
 union cpuid10_eax {
 	struct {
@@ -445,6 +449,107 @@  static bool use_amd_pmu(void)
 		 is_zen3(entry->eax));
 }
 
+#define ENCODE_MASKED_EVENT(select, mask, match, invert) \
+		KVM_PMU_EVENT_ENCODE_MASKED_EVENT(select, mask, match, invert)
+
+static void expect_success(uint64_t count)
+{
+	if (count != NUM_BRANCHES)
+		pr_info("masked filter: Branch instructions retired = %lu (expected %u)\n",
+			count, NUM_BRANCHES);
+	TEST_ASSERT(count, "Allowed PMU event is not counting");
+}
+
+static void expect_failure(uint64_t count)
+{
+	if (count)
+		pr_info("masked filter: Branch instructions retired = %lu (expected 0)\n",
+			count);
+	TEST_ASSERT(!count, "Disallowed PMU Event is counting");
+}
+
+static void run_masked_filter_test(struct kvm_vm *vm, uint64_t masked_events[],
+				   const int nmasked_events, uint64_t event,
+				   uint32_t action, bool invert,
+				   void (*expected_func)(uint64_t))
+{
+	struct kvm_pmu_event_filter *f;
+	uint64_t old_event;
+	uint64_t count;
+	int i;
+
+	for (i = 0; i < nmasked_events; i++) {
+		if((masked_events[i] & AMD64_EVENTSEL_EVENT) != EVENT(event, 0))
+			continue;
+
+		old_event = masked_events[i];
+
+		masked_events[i] =
+			ENCODE_MASKED_EVENT(event, ~0x00, 0x00, invert);
+
+		f = create_pmu_event_filter(masked_events, nmasked_events, action,
+				   KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+
+		count = test_with_filter(vm, f);
+		free(f);
+
+		expected_func(count);
+
+		masked_events[i] = old_event;
+	}
+}
+
+static void run_masked_filter_tests(struct kvm_vm *vm, uint64_t masked_events[],
+				    const int nmasked_events, uint64_t event)
+{
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_ALLOW, /*invert=*/false,
+			       expect_success);
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_ALLOW, /*invert=*/true,
+			       expect_failure);
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_DENY, /*invert=*/false,
+			       expect_failure);
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_DENY, /*invert=*/true,
+			       expect_success);
+}
+
+static void test_masked_filters(struct kvm_vm *vm)
+{
+	uint64_t masked_events[11];
+	const int nmasked_events = ARRAY_SIZE(masked_events);
+	uint64_t prev_event, event, next_event;
+	int i;
+
+	if (use_intel_pmu()) {
+		/* Instructions retired */
+		prev_event = 0xc0;
+		event = INTEL_BR_RETIRED;
+		/* Branch misses retired */
+		next_event = 0xc5;
+	} else {
+		TEST_ASSERT(use_amd_pmu(), "Unknown platform");
+		/* Retired instructions */
+		prev_event = 0xc0;
+		event = AMD_ZEN_BR_RETIRED;
+		/* Retired branch instructions mispredicted */
+		next_event = 0xc3;
+	}
+
+	for (i = 0; i < nmasked_events; i++)
+		masked_events[i] =
+			ENCODE_MASKED_EVENT(event, ~0x00, i+1, 0);
+
+	run_masked_filter_tests(vm, masked_events, nmasked_events, event);
+
+	masked_events[0] = ENCODE_MASKED_EVENT(prev_event, ~0x00, 0, 0);
+	masked_events[1] = ENCODE_MASKED_EVENT(next_event, ~0x00, 0, 0);
+
+	run_masked_filter_tests(vm, masked_events, nmasked_events, event);
+}
+
 int main(int argc, char *argv[])
 {
 	void (*guest_code)(void) = NULL;
@@ -489,6 +594,8 @@  int main(int argc, char *argv[])
 	test_not_member_deny_list(vm);
 	test_not_member_allow_list(vm);
 
+	test_masked_filters(vm);
+
 	kvm_vm_free(vm);
 
 	test_pmu_config_disable(guest_code);