@@ -59,12 +59,20 @@
#define PRE_OVERFLOW 0xFFFFFFF0
#define PRE_OVERFLOW2 0xFFFFFFDC
+#define PMU_PPI 23
+
struct pmu {
unsigned int version;
unsigned int nb_implemented_counters;
uint32_t pmcr_ro;
};
+struct pmu_stats {
+ unsigned long bitmap;
+ uint32_t interrupts[32];
+ bool unexpected;
+};
+
static struct pmu pmu;
#if defined(__arm__)
@@ -146,6 +154,7 @@ static void test_sw_incr(void) {}
static void test_chained_counters(void) {}
static void test_chained_sw_incr(void) {}
static void test_chain_promotion(void) {}
+static void test_overflow_interrupt(void) {}
#elif defined(__aarch64__)
#define ID_AA64DFR0_PERFMON_SHIFT 8
@@ -276,6 +285,43 @@ asm volatile(
: "x9", "x10", "cc");
}
+static struct pmu_stats pmu_stats;
+
+static void irq_handler(struct pt_regs *regs)
+{
+ uint32_t irqstat, irqnr;
+
+ irqstat = gic_read_iar();
+ irqnr = gic_iar_irqnr(irqstat);
+
+ if (irqnr == PMU_PPI) {
+ unsigned long overflows = read_sysreg(pmovsclr_el0);
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if (test_and_clear_bit(i, &overflows)) {
+ pmu_stats.interrupts[i]++;
+ pmu_stats.bitmap |= 1 << i;
+ }
+ }
+ write_sysreg(ALL_SET, pmovsclr_el0);
+ } else {
+ pmu_stats.unexpected = true;
+ }
+ gic_write_eoir(irqstat);
+}
+
+static void pmu_reset_stats(void)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ pmu_stats.interrupts[i] = 0;
+
+ pmu_stats.bitmap = 0;
+ pmu_stats.unexpected = false;
+}
+
static void pmu_reset(void)
{
/* reset all counters, counting disabled at PMCR level*/
@@ -286,6 +332,7 @@ static void pmu_reset(void)
write_sysreg(ALL_SET, pmovsclr_el0);
/* disable overflow interrupts on all counters */
write_sysreg(ALL_SET, pmintenclr_el1);
+ pmu_reset_stats();
isb();
}
@@ -729,6 +776,94 @@ static void test_chain_promotion(void)
read_sysreg(pmovsclr_el0));
}
+static bool expect_interrupts(uint32_t bitmap)
+{
+ int i;
+
+ if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected)
+ return false;
+
+ for (i = 0; i < 32; i++) {
+ if (test_and_clear_bit(i, &pmu_stats.bitmap))
+ if (pmu_stats.interrupts[i] != 1)
+ return false;
+ }
+ return true;
+}
+
+static void test_overflow_interrupt(void)
+{
+ uint32_t events[] = {MEM_ACCESS, SW_INCR};
+ void *addr = malloc(PAGE_SIZE);
+ int i;
+
+ if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
+ return;
+
+ gic_enable_defaults();
+ install_irq_handler(EL1H_IRQ, irq_handler);
+ local_irq_enable();
+ gic_enable_irq(23);
+
+ pmu_reset();
+
+ write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
+ write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
+ write_sysreg_s(0x3, PMCNTENSET_EL0);
+ write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
+ write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
+ isb();
+
+ /* interrupts are disabled */
+
+ mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
+ report(expect_interrupts(0), "no overflow interrupt after preset");
+
+ set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
+ for (i = 0; i < 100; i++)
+ write_sysreg(0x2, pmswinc_el0);
+
+ set_pmcr(pmu.pmcr_ro);
+ report(expect_interrupts(0), "no overflow interrupt after counting");
+
+ /* enable interrupts */
+
+ pmu_reset_stats();
+
+ write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
+ write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
+ write_sysreg(ALL_SET, pmintenset_el1);
+ isb();
+
+ mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
+ for (i = 0; i < 100; i++)
+ write_sysreg(0x3, pmswinc_el0);
+
+ mem_access_loop(addr, 200, pmu.pmcr_ro);
+ report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0));
+ report(expect_interrupts(0x3),
+ "overflow interrupts expected on #0 and #1");
+
+ /* promote to 64-b */
+
+ pmu_reset_stats();
+
+ write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
+ write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
+ isb();
+ mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
+ report(expect_interrupts(0),
+ "no overflow interrupt expected on 32b boundary");
+
+ /* overflow on odd counter */
+ pmu_reset_stats();
+ write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
+ write_regn_el0(pmevcntr, 1, ALL_SET);
+ isb();
+ mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E);
+ report(expect_interrupts(0x2),
+ "expect overflow interrupt on odd counter");
+}
#endif
/*
@@ -931,6 +1066,10 @@ int main(int argc, char *argv[])
report_prefix_push(argv[1]);
test_chain_promotion();
report_prefix_pop();
+ } else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) {
+ report_prefix_push(argv[1]);
+ test_overflow_interrupt();
+ report_prefix_pop();
} else {
report_abort("Unknown sub-test '%s'", argv[1]);
}
@@ -114,6 +114,12 @@ groups = pmu
arch = arm64
extra_params = -append 'pmu-chain-promotion'
+[pmu-overflow-interrupt]
+file = pmu.flat
+groups = pmu
+arch = arm64
+extra_params = -append 'pmu-overflow-interrupt'
+
# Test PMU support (TCG) with -icount IPC=1
#[pmu-tcg-icount-1]
#file = pmu.flat
Test overflows for MEM_ACCESS and SW_INCR events. Also tests overflows with 64-bit events. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- v3 -> v4: - all report messages are different v2 -> v3: - added prefix pop - added pmu_stats.unexpected - added pmu- prefix - remove traces in irq_handler() v1 -> v2: - inline setup_irq() code --- arm/pmu.c | 139 ++++++++++++++++++++++++++++++++++++++++++++++ arm/unittests.cfg | 6 ++ 2 files changed, 145 insertions(+)