Message ID | 20200617113851.607706-7-alexandru.elisei@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm_pmu: Use NMI for perf interrupt | expand |
Quoting Alexandru Elisei (2020-06-17 04:38:50) > diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c > index df352b334ea7..17e5952d21e4 100644 > --- a/drivers/perf/arm_pmu.c > +++ b/drivers/perf/arm_pmu.c > @@ -26,8 +26,46 @@ > > #include <asm/irq_regs.h> > > +static int armpmu_count_irq_users(const int irq); > + > +struct pmu_irq_ops { > + void (*enable_pmuirq)(unsigned int irq); > + void (*disable_pmuirq)(unsigned int irq); > + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); Does 'cpu' need to be signed? > +}; > + > +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) > +{ > + free_irq(irq, per_cpu_ptr(devid, cpu)); > +} > + > +static const struct pmu_irq_ops pmuirq_ops = { > + .enable_pmuirq = enable_irq, > + .disable_pmuirq = disable_irq_nosync, > + .free_pmuirq = armpmu_free_pmuirq > +}; > + > +static void armpmu_enable_percpu_pmuirq(unsigned int irq) > +{ > + enable_percpu_irq(irq, IRQ_TYPE_NONE); > +} > + > +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, > + void __percpu *devid) > +{ > + if (armpmu_count_irq_users(irq) == 1) > + free_percpu_irq(irq, devid); > +} > + > +static const struct pmu_irq_ops percpu_pmuirq_ops = { > + .enable_pmuirq = armpmu_enable_percpu_pmuirq, > + .disable_pmuirq = disable_percpu_irq, > + .free_pmuirq = armpmu_free_percpu_pmuirq > +}; > + > static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); > static DEFINE_PER_CPU(int, cpu_irq); Same question as above. > +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); > > static inline u64 arm_pmu_event_max_period(struct perf_event *event) > {
Hi, On 6/17/20 9:23 PM, Stephen Boyd wrote: > Quoting Alexandru Elisei (2020-06-17 04:38:50) >> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c >> index df352b334ea7..17e5952d21e4 100644 >> --- a/drivers/perf/arm_pmu.c >> +++ b/drivers/perf/arm_pmu.c >> @@ -26,8 +26,46 @@ >> >> #include <asm/irq_regs.h> >> >> +static int armpmu_count_irq_users(const int irq); >> + >> +struct pmu_irq_ops { >> + void (*enable_pmuirq)(unsigned int irq); >> + void (*disable_pmuirq)(unsigned int irq); >> + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); > Does 'cpu' need to be signed? I'm not sure what you mean. The cpu argument comes from drivers/perf/arm_pmu_platform.c::armpmu_free_irqs -> arpmu_free_irq, where is the iterator variable used by the macro for_each_cpu. The documentation for the macro states: /** * for_each_cpu - iterate over every cpu in a mask * @cpu: the (optionally unsigned) integer iterator ^^^^^^^^^^^^^^^^^^^ I could write a patch to convert to an unsigned int, but it seems like unnecessary churn to me. >> +}; >> + >> +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) >> +{ >> + free_irq(irq, per_cpu_ptr(devid, cpu)); >> +} >> + >> +static const struct pmu_irq_ops pmuirq_ops = { >> + .enable_pmuirq = enable_irq, >> + .disable_pmuirq = disable_irq_nosync, >> + .free_pmuirq = armpmu_free_pmuirq >> +}; >> + >> +static void armpmu_enable_percpu_pmuirq(unsigned int irq) >> +{ >> + enable_percpu_irq(irq, IRQ_TYPE_NONE); >> +} >> + >> +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, >> + void __percpu *devid) >> +{ >> + if (armpmu_count_irq_users(irq) == 1) >> + free_percpu_irq(irq, devid); >> +} >> + >> +static const struct pmu_irq_ops percpu_pmuirq_ops = { >> + .enable_pmuirq = armpmu_enable_percpu_pmuirq, >> + .disable_pmuirq = disable_percpu_irq, >> + .free_pmuirq = armpmu_free_percpu_pmuirq >> +}; >> + >> static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); >> static DEFINE_PER_CPU(int, cpu_irq); > Same question as above. Same situation as above - cpu is the iterator variable for for_each_cpu. Thanks, Alex
Quoting Alexandru Elisei (2020-06-18 03:51:24) > Hi, > > On 6/17/20 9:23 PM, Stephen Boyd wrote: > > Quoting Alexandru Elisei (2020-06-17 04:38:50) > >> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c > >> index df352b334ea7..17e5952d21e4 100644 > >> --- a/drivers/perf/arm_pmu.c > >> +++ b/drivers/perf/arm_pmu.c > >> @@ -26,8 +26,46 @@ > >> > >> #include <asm/irq_regs.h> > >> > >> +static int armpmu_count_irq_users(const int irq); > >> + > >> +struct pmu_irq_ops { > >> + void (*enable_pmuirq)(unsigned int irq); > >> + void (*disable_pmuirq)(unsigned int irq); > >> + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); > > Does 'cpu' need to be signed? > > I'm not sure what you mean. The cpu argument comes from > drivers/perf/arm_pmu_platform.c::armpmu_free_irqs -> arpmu_free_irq, where is the > iterator variable used by the macro for_each_cpu. The documentation for the macro > states: > > /** > * for_each_cpu - iterate over every cpu in a mask > * @cpu: the (optionally unsigned) integer iterator ^^^^^^^^^^^^^^^^^^^ > > I could write a patch to convert to an unsigned int, but it seems like unnecessary > churn to me. Ok. It would be nice to make it unsigned in the arm_pmu_platform.c file. Not required for this patch series.
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index df352b334ea7..17e5952d21e4 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -26,8 +26,46 @@ #include <asm/irq_regs.h> +static int armpmu_count_irq_users(const int irq); + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int irq); + void (*disable_pmuirq)(unsigned int irq); + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); +}; + +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) +{ + free_irq(irq, per_cpu_ptr(devid, cpu)); +} + +static const struct pmu_irq_ops pmuirq_ops = { + .enable_pmuirq = enable_irq, + .disable_pmuirq = disable_irq_nosync, + .free_pmuirq = armpmu_free_pmuirq +}; + +static void armpmu_enable_percpu_pmuirq(unsigned int irq) +{ + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, + void __percpu *devid) +{ + if (armpmu_count_irq_users(irq) == 1) + free_percpu_irq(irq, devid); +} + +static const struct pmu_irq_ops percpu_pmuirq_ops = { + .enable_pmuirq = armpmu_enable_percpu_pmuirq, + .disable_pmuirq = disable_percpu_irq, + .free_pmuirq = armpmu_free_percpu_pmuirq +}; + static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); static inline u64 arm_pmu_event_max_period(struct perf_event *event) { @@ -544,6 +582,19 @@ static int armpmu_count_irq_users(const int irq) return count; } +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (per_cpu(cpu_irq, cpu) == irq + && per_cpu(cpu_irq_ops, cpu)) + return per_cpu(cpu_irq_ops, cpu); + } + + return NULL; +} + void armpmu_free_irq(int irq, int cpu) { if (per_cpu(cpu_irq, cpu) == 0) @@ -551,18 +602,18 @@ void armpmu_free_irq(int irq, int cpu) if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) return; - if (!irq_is_percpu_devid(irq)) - free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); - else if (armpmu_count_irq_users(irq) == 1) - free_percpu_irq(irq, &cpu_armpmu); + per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); per_cpu(cpu_irq, cpu) = 0; + per_cpu(cpu_irq_ops, cpu) = NULL; } int armpmu_request_irq(int irq, int cpu) { int err = 0; const irq_handler_t handler = armpmu_dispatch_irq; + const struct pmu_irq_ops *irq_ops; + if (!irq) return 0; @@ -584,15 +635,26 @@ int armpmu_request_irq(int irq, int cpu) irq_set_status_flags(irq, IRQ_NOAUTOEN); err = request_irq(irq, handler, irq_flags, "arm-pmu", per_cpu_ptr(&cpu_armpmu, cpu)); + + irq_ops = &pmuirq_ops; } else if (armpmu_count_irq_users(irq) == 0) { err = request_percpu_irq(irq, handler, "arm-pmu", &cpu_armpmu); + + irq_ops = &percpu_pmuirq_ops; + } else { + /* Per cpudevid irq was already requested by another CPU */ + irq_ops = armpmu_find_irq_ops(irq); + + if (WARN_ON(!irq_ops)) + err = -EINVAL; } if (err) goto err_out; per_cpu(cpu_irq, cpu) = irq; + per_cpu(cpu_irq_ops, cpu) = irq_ops; return 0; err_out: @@ -625,12 +687,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) per_cpu(cpu_armpmu, cpu) = pmu; irq = armpmu_get_cpu_irq(pmu, cpu); - if (irq) { - if (irq_is_percpu_devid(irq)) - enable_percpu_irq(irq, IRQ_TYPE_NONE); - else - enable_irq(irq); - } + if (irq) + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); return 0; } @@ -644,12 +702,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) return 0; irq = armpmu_get_cpu_irq(pmu, cpu); - if (irq) { - if (irq_is_percpu_devid(irq)) - disable_percpu_irq(irq); - else - disable_irq_nosync(irq); - } + if (irq) + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); per_cpu(cpu_armpmu, cpu) = NULL;