@@ -23,6 +23,7 @@ LIBKVM += lib/guest_modes.c
LIBKVM += lib/io.c
LIBKVM += lib/kvm_util.c
LIBKVM += lib/memstress.c
+LIBKVM += lib/pmu.c
LIBKVM += lib/guest_sprintf.c
LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
new file mode 100644
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tools/testing/selftests/kvm/include/pmu.h
+ *
+ * Copyright (C) 2023, Tencent, Inc.
+ */
+#ifndef SELFTEST_KVM_PMU_H
+#define SELFTEST_KVM_PMU_H
+
+#include <stdint.h>
+
+#define X86_PMC_IDX_MAX 64
+#define INTEL_PMC_MAX_GENERIC 32
+#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
+
+#define GP_COUNTER_NR_OFS_BIT 8
+#define EVENT_LENGTH_OFS_BIT 24
+
+#define PMU_VERSION_MASK GENMASK_ULL(7, 0)
+#define EVENT_LENGTH_MASK GENMASK_ULL(31, EVENT_LENGTH_OFS_BIT)
+#define GP_COUNTER_NR_MASK GENMASK_ULL(15, GP_COUNTER_NR_OFS_BIT)
+#define FIXED_COUNTER_NR_MASK GENMASK_ULL(4, 0)
+
+#define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0)
+#define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8)
+#define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16)
+#define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17)
+#define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18)
+#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19)
+#define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20)
+#define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21)
+#define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22)
+#define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23)
+#define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24)
+
+#define PMC_MAX_FIXED 16
+#define PMC_IDX_FIXED 32
+
+/* RDPMC offset for Fixed PMCs */
+#define PMC_FIXED_RDPMC_BASE BIT_ULL(30)
+#define PMC_FIXED_RDPMC_METRICS BIT_ULL(29)
+
+#define FIXED_BITS_MASK 0xFULL
+#define FIXED_BITS_STRIDE 4
+#define FIXED_0_KERNEL BIT_ULL(0)
+#define FIXED_0_USER BIT_ULL(1)
+#define FIXED_0_ANYTHREAD BIT_ULL(2)
+#define FIXED_0_ENABLE_PMI BIT_ULL(3)
+
+#define fixed_bits_by_idx(_idx, _bits) \
+ ((_bits) << ((_idx) * FIXED_BITS_STRIDE))
+
+#define AMD64_NR_COUNTERS 4
+#define AMD64_NR_COUNTERS_CORE 6
+
+#define PMU_CAP_FW_WRITES BIT_ULL(13)
+#define PMU_CAP_LBR_FMT 0x3f
+
+enum intel_pmu_architectural_events {
+ /*
+ * The order of the architectural events matters as support for each
+ * event is enumerated via CPUID using the index of the event.
+ */
+ INTEL_ARCH_CPU_CYCLES,
+ INTEL_ARCH_INSTRUCTIONS_RETIRED,
+ INTEL_ARCH_REFERENCE_CYCLES,
+ INTEL_ARCH_LLC_REFERENCES,
+ INTEL_ARCH_LLC_MISSES,
+ INTEL_ARCH_BRANCHES_RETIRED,
+ INTEL_ARCH_BRANCHES_MISPREDICTED,
+
+ NR_REAL_INTEL_ARCH_EVENTS,
+
+ /*
+ * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
+ * TSC reference cycles. The architectural reference cycles event may
+ * or may not actually use the TSC as the reference, e.g. might use the
+ * core crystal clock or the bus clock (yeah, "architectural").
+ */
+ PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
+ NR_INTEL_ARCH_EVENTS,
+};
+
+enum amd_pmu_k7_events {
+ AMD_ZEN_CORE_CYCLES,
+ AMD_ZEN_INSTRUCTIONS,
+ AMD_ZEN_BRANCHES,
+ AMD_ZEN_BRANCH_MISSES,
+ NR_AMD_ARCH_EVENTS,
+};
+
+extern const uint64_t intel_pmu_arch_events[];
+extern const uint64_t amd_pmu_arch_events[];
+extern const int intel_pmu_fixed_pmc_events[];
+
+#endif /* SELFTEST_KVM_PMU_H */
new file mode 100644
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel-based Virtual Machine -- Selftests Performance Monitoring Unit support
+ *
+ * Copyright (C) 2023, Tencent, Inc.
+ */
+
+#include <stdint.h>
+
+#include "pmu.h"
+
+/* Definitions for Architectural Performance Events */
+#define ARCH_EVENT(select, umask) (((select) & 0xff) | ((umask) & 0xff) << 8)
+
+const uint64_t intel_pmu_arch_events[] = {
+ [INTEL_ARCH_CPU_CYCLES] = ARCH_EVENT(0x3c, 0x0),
+ [INTEL_ARCH_INSTRUCTIONS_RETIRED] = ARCH_EVENT(0xc0, 0x0),
+ [INTEL_ARCH_REFERENCE_CYCLES] = ARCH_EVENT(0x3c, 0x1),
+ [INTEL_ARCH_LLC_REFERENCES] = ARCH_EVENT(0x2e, 0x4f),
+ [INTEL_ARCH_LLC_MISSES] = ARCH_EVENT(0x2e, 0x41),
+ [INTEL_ARCH_BRANCHES_RETIRED] = ARCH_EVENT(0xc4, 0x0),
+ [INTEL_ARCH_BRANCHES_MISPREDICTED] = ARCH_EVENT(0xc5, 0x0),
+ [PSEUDO_ARCH_REFERENCE_CYCLES] = ARCH_EVENT(0xa4, 0x1),
+};
+
+/* mapping between fixed pmc index and intel_arch_events array */
+const int intel_pmu_fixed_pmc_events[] = {
+ [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
+ [1] = INTEL_ARCH_CPU_CYCLES,
+ [2] = PSEUDO_ARCH_REFERENCE_CYCLES,
+};
+
+const uint64_t amd_pmu_arch_events[] = {
+ [AMD_ZEN_CORE_CYCLES] = ARCH_EVENT(0x76, 0x00),
+ [AMD_ZEN_INSTRUCTIONS] = ARCH_EVENT(0xc0, 0x00),
+ [AMD_ZEN_BRANCHES] = ARCH_EVENT(0xc2, 0x00),
+ [AMD_ZEN_BRANCH_MISSES] = ARCH_EVENT(0xc3, 0x00),
+};
@@ -11,31 +11,18 @@
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
-#include "test_util.h"
+
#include "kvm_util.h"
+#include "pmu.h"
#include "processor.h"
-
-/*
- * In lieu of copying perf_event.h into tools...
- */
-#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
-#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
-
-/* End of stuff taken from perf_event.h. */
-
-/* Oddly, this isn't in perf_event.h. */
-#define ARCH_PERFMON_BRANCHES_RETIRED 5
+#include "test_util.h"
#define NUM_BRANCHES 42
-#define INTEL_PMC_IDX_FIXED 32
-
-/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
-#define MAX_FILTER_EVENTS 300
#define MAX_TEST_EVENTS 10
#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
-#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
+#define PMU_EVENT_FILTER_INVALID_NEVENTS (KVM_PMU_EVENT_FILTER_MAX_EVENTS + 1)
/*
* This is how the event selector and unit mask are stored in an AMD
@@ -63,7 +50,6 @@
#define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
-
/*
* "Retired instructions", from Processor Programming Reference
* (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
@@ -84,7 +70,7 @@ struct __kvm_pmu_event_filter {
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
- __u64 events[MAX_FILTER_EVENTS];
+ __u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS];
};
/*
@@ -729,14 +715,14 @@ static void add_dummy_events(uint64_t *events, int nevents)
static void test_masked_events(struct kvm_vcpu *vcpu)
{
- int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS;
- uint64_t events[MAX_FILTER_EVENTS];
+ int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS;
+ uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS];
/* Run the test cases against a sparse PMU event filter. */
run_masked_events_tests(vcpu, events, 0);
/* Run the test cases against a dense PMU event filter. */
- add_dummy_events(events, MAX_FILTER_EVENTS);
+ add_dummy_events(events, KVM_PMU_EVENT_FILTER_MAX_EVENTS);
run_masked_events_tests(vcpu, events, nevents);
}
@@ -818,7 +804,7 @@ static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
/* Only OS_EN bit is enabled for fixed counter[idx]. */
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
- BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
+ BIT_ULL(PMC_IDX_FIXED + fixed_ctr_idx));
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);