@@ -53,6 +53,7 @@ char *buf;
static struct pmu_event *gp_events;
static unsigned int gp_events_size;
+static unsigned int fixed_events_size;
static inline void loop(void)
{
@@ -256,6 +257,8 @@ static void check_fixed_counters(void)
int i;
for (i = 0; i < pmu.nr_fixed_counters; i++) {
+ if (i >= fixed_events_size)
+ continue;
cnt.ctr = fixed_events[i].unit_sel;
measure_one(&cnt);
report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", i);
@@ -277,6 +280,8 @@ static void check_counters_many(void)
n++;
}
for (i = 0; i < pmu.nr_fixed_counters; i++) {
+ if (i >= fixed_events_size)
+ continue;
cnt[n].ctr = fixed_events[i].unit_sel;
cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
n++;
@@ -700,6 +705,7 @@ int main(int ac, char **av)
}
gp_events = (struct pmu_event *)intel_gp_events;
gp_events_size = sizeof(intel_gp_events)/sizeof(intel_gp_events[0]);
+ fixed_events_size = sizeof(fixed_events)/sizeof(fixed_events[0]);
report_prefix_push("Intel");
set_ref_cycle_expectations();
} else {
Arch PMU v5 has Fixed Counter Enumeration feature, user can specify a fixed counter which has index greater than fixed_events[] array through CPUID.0AH.ECX, so limit fixed counter index into fixed_events[] array. Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.com> --- x86/pmu.c | 6 ++++++ 1 file changed, 6 insertions(+)