@@ -37,6 +37,7 @@
#define PMCR0_PMI_ENABLE_8_9 GENMASK(45, 44)
#define SYS_IMP_APL_PMCR1_EL1 sys_reg(3, 1, 15, 1, 0)
+#define SYS_IMP_APL_PMCR1_EL12 sys_reg(3, 1, 15, 7, 2)
#define PMCR1_COUNT_A64_EL0_0_7 GENMASK(15, 8)
#define PMCR1_COUNT_A64_EL1_0_7 GENMASK(23, 16)
#define PMCR1_COUNT_A64_EL0_8_9 GENMASK(41, 40)
@@ -325,11 +325,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
- bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+ bool kernel)
{
- u64 val, user_bit, kernel_bit;
- int shift;
+ u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -344,19 +343,24 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
- val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+ clear = set = 0;
if (user)
- val |= user_bit;
+ set |= user_bit;
else
- val &= ~user_bit;
+ clear |= user_bit;
if (kernel)
- val |= kernel_bit;
+ set |= kernel_bit;
else
- val &= ~kernel_bit;
+ clear |= kernel_bit;
- write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+}
+
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+ u64 clear = 0, set = 0;
+ int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -369,21 +373,29 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+ bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+ bool user = config_base & M1_PMU_CFG_COUNT_USER;
+ u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+ __m1_pmu_configure_event_filter(index, user, kernel);
+ __m1_pmu_configure_eventsel(index, evt);
+}
+
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -398,7 +410,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
m1_pmu_disable_counter(event->hw.idx);
isb();
- m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
Supporting guest mode events will necessitate programming two event filters. Prepare by splitting up the programming of the event selector + event filter into separate headers. Opportunistically replace RMW patterns with sysreg_clear_set_s(). Signed-off-by: Oliver Upton <oliver.upton@linux.dev> --- arch/arm64/include/asm/apple_m1_pmu.h | 1 + drivers/perf/apple_m1_cpu_pmu.c | 52 ++++++++++++++++----------- 2 files changed, 33 insertions(+), 20 deletions(-)