@@ -69,6 +69,12 @@ enum armv6_counters {
};
/*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+DEFINE_PER_CPU(raw_spinlock_t, pmu_lock);
+
+/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
@@ -271,7 +277,7 @@ static void armv6pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ struct raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -294,12 +300,12 @@ static void armv6pmu_enable_event(struct perf_event *event)
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static irqreturn_t
@@ -363,25 +369,25 @@ static void armv6pmu_enable_event(struct perf_event *event)
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static int
@@ -502,6 +508,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
+
+ raw_spin_lock_init(this_cpu_ptr(&pmu_lock));
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
@@ -822,7 +822,6 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
@@ -58,11 +58,6 @@ struct pmu_hw_events {
*/
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
- /*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
/*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
Perf event backend for ARMv8 and ARMv7 no longer uses the pmu_lock. The only remaining user is the ARMv6 event backend. Move the pmu_lock out of the generic arm_pmu driver into the ARMv6 code. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Russell King <linux@armlinux.org.uk> --- arch/arm/kernel/perf_event_v6.c | 26 +++++++++++++++++--------- drivers/perf/arm_pmu.c | 1 - include/linux/perf/arm_pmu.h | 5 ----- 3 files changed, 17 insertions(+), 15 deletions(-) -- 1.9.1