Message ID | 20240813203409.3985398-1-andrii@kernel.org (mailing list archive) |
---|---|
State | Accepted |
Commit | 10cdb82aa77f313dcfe947a17f7fc12c5affb38e |
Headers | show |
Series | [v3] uprobes: turn trace_uprobe's nhit counter to be per-CPU one | expand |
On Tue, Aug 13, 2024 at 1:34 PM Andrii Nakryiko <andrii@kernel.org> wrote: > > trace_uprobe->nhit counter is not incremented atomically, so its value > is questionable in when uprobe is hit on multiple CPUs simultaneously. > > Also, doing this shared counter increment across many CPUs causes heavy > cache line bouncing, limiting uprobe/uretprobe performance scaling with > number of CPUs. > > Solve both problems by making this a per-CPU counter. > > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > --- > kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++--- > 1 file changed, 21 insertions(+), 3 deletions(-) > Is there anything else I'm expected to do about this patch? If not, can this please be applied? Thanks! > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index c98e3b3386ba..c3df411a2684 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -17,6 +17,7 @@ > #include <linux/string.h> > #include <linux/rculist.h> > #include <linux/filter.h> > +#include <linux/percpu.h> > > #include "trace_dynevent.h" > #include "trace_probe.h" > @@ -62,7 +63,7 @@ struct trace_uprobe { > char *filename; > unsigned long offset; > unsigned long ref_ctr_offset; > - unsigned long nhit; > + unsigned long __percpu *nhits; > struct trace_probe tp; > }; > > @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > if (!tu) > return ERR_PTR(-ENOMEM); > > + tu->nhits = alloc_percpu(unsigned long); > + if (!tu->nhits) { > + ret = -ENOMEM; > + goto error; > + } > + > ret = trace_probe_init(&tu->tp, event, group, true, nargs); > if (ret < 0) > goto error; > @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > return tu; > > error: > + free_percpu(tu->nhits); > kfree(tu); > > return ERR_PTR(ret); > @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) > path_put(&tu->path); > trace_probe_cleanup(&tu->tp); > kfree(tu->filename); > + free_percpu(tu->nhits); > kfree(tu); > } > > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) > { > struct dyn_event *ev = v; > struct trace_uprobe *tu; > + unsigned long nhits; > + int cpu; > > if (!is_trace_uprobe(ev)) > return 0; > > tu = to_trace_uprobe(ev); > + > + nhits = 0; > + for_each_possible_cpu(cpu) { > + nhits += per_cpu(*tu->nhits, cpu); > + } > + > seq_printf(m, " %s %-44s %15lu\n", tu->filename, > - trace_probe_name(&tu->tp), tu->nhit); > + trace_probe_name(&tu->tp), nhits); > return 0; > } > > @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) > int ret = 0; > > tu = container_of(con, struct trace_uprobe, consumer); > - tu->nhit++; > + > + this_cpu_inc(*tu->nhits); > > udd.tu = tu; > udd.bp_addr = instruction_pointer(regs); > -- > 2.43.5 >
On Tue, Aug 13, 2024 at 01:34:09PM -0700, Andrii Nakryiko wrote: > trace_uprobe->nhit counter is not incremented atomically, so its value > is questionable in when uprobe is hit on multiple CPUs simultaneously. > > Also, doing this shared counter increment across many CPUs causes heavy > cache line bouncing, limiting uprobe/uretprobe performance scaling with > number of CPUs. > > Solve both problems by making this a per-CPU counter. > > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> lgtm, fwiw Reviewed-by: Jiri Olsa <jolsa@kernel.org> jirka > --- > kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++--- > 1 file changed, 21 insertions(+), 3 deletions(-) > > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index c98e3b3386ba..c3df411a2684 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -17,6 +17,7 @@ > #include <linux/string.h> > #include <linux/rculist.h> > #include <linux/filter.h> > +#include <linux/percpu.h> > > #include "trace_dynevent.h" > #include "trace_probe.h" > @@ -62,7 +63,7 @@ struct trace_uprobe { > char *filename; > unsigned long offset; > unsigned long ref_ctr_offset; > - unsigned long nhit; > + unsigned long __percpu *nhits; > struct trace_probe tp; > }; > > @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > if (!tu) > return ERR_PTR(-ENOMEM); > > + tu->nhits = alloc_percpu(unsigned long); > + if (!tu->nhits) { > + ret = -ENOMEM; > + goto error; > + } > + > ret = trace_probe_init(&tu->tp, event, group, true, nargs); > if (ret < 0) > goto error; > @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > return tu; > > error: > + free_percpu(tu->nhits); > kfree(tu); > > return ERR_PTR(ret); > @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) > path_put(&tu->path); > trace_probe_cleanup(&tu->tp); > kfree(tu->filename); > + free_percpu(tu->nhits); > kfree(tu); > } > > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) > { > struct dyn_event *ev = v; > struct trace_uprobe *tu; > + unsigned long nhits; > + int cpu; > > if (!is_trace_uprobe(ev)) > return 0; > > tu = to_trace_uprobe(ev); > + > + nhits = 0; > + for_each_possible_cpu(cpu) { > + nhits += per_cpu(*tu->nhits, cpu); > + } > + > seq_printf(m, " %s %-44s %15lu\n", tu->filename, > - trace_probe_name(&tu->tp), tu->nhit); > + trace_probe_name(&tu->tp), nhits); > return 0; > } > > @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) > int ret = 0; > > tu = container_of(con, struct trace_uprobe, consumer); > - tu->nhit++; > + > + this_cpu_inc(*tu->nhits); > > udd.tu = tu; > udd.bp_addr = instruction_pointer(regs); > -- > 2.43.5 >
On Tue, 13 Aug 2024 13:34:09 -0700 Andrii Nakryiko <andrii@kernel.org> wrote: > trace_uprobe->nhit counter is not incremented atomically, so its value > is questionable in when uprobe is hit on multiple CPUs simultaneously. > > Also, doing this shared counter increment across many CPUs causes heavy > cache line bouncing, limiting uprobe/uretprobe performance scaling with > number of CPUs. > > Solve both problems by making this a per-CPU counter. > Looks good to me. Let me pick it. > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > --- > kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++--- > 1 file changed, 21 insertions(+), 3 deletions(-) > > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index c98e3b3386ba..c3df411a2684 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -17,6 +17,7 @@ > #include <linux/string.h> > #include <linux/rculist.h> > #include <linux/filter.h> > +#include <linux/percpu.h> > > #include "trace_dynevent.h" > #include "trace_probe.h" > @@ -62,7 +63,7 @@ struct trace_uprobe { > char *filename; > unsigned long offset; > unsigned long ref_ctr_offset; > - unsigned long nhit; > + unsigned long __percpu *nhits; > struct trace_probe tp; > }; > > @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > if (!tu) > return ERR_PTR(-ENOMEM); > > + tu->nhits = alloc_percpu(unsigned long); > + if (!tu->nhits) { > + ret = -ENOMEM; > + goto error; > + } > + > ret = trace_probe_init(&tu->tp, event, group, true, nargs); > if (ret < 0) > goto error; > @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > return tu; > > error: > + free_percpu(tu->nhits); > kfree(tu); > > return ERR_PTR(ret); > @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) > path_put(&tu->path); > trace_probe_cleanup(&tu->tp); > kfree(tu->filename); > + free_percpu(tu->nhits); > kfree(tu); > } > > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) > { > struct dyn_event *ev = v; > struct trace_uprobe *tu; > + unsigned long nhits; > + int cpu; > > if (!is_trace_uprobe(ev)) > return 0; > > tu = to_trace_uprobe(ev); > + > + nhits = 0; > + for_each_possible_cpu(cpu) { > + nhits += per_cpu(*tu->nhits, cpu); > + } > + > seq_printf(m, " %s %-44s %15lu\n", tu->filename, > - trace_probe_name(&tu->tp), tu->nhit); > + trace_probe_name(&tu->tp), nhits); > return 0; > } > > @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) > int ret = 0; > > tu = container_of(con, struct trace_uprobe, consumer); > - tu->nhit++; > + > + this_cpu_inc(*tu->nhits); > > udd.tu = tu; > udd.bp_addr = instruction_pointer(regs); > -- > 2.43.5 >
On Tue, 13 Aug 2024 13:34:09 -0700 Andrii Nakryiko <andrii@kernel.org> wrote: > trace_uprobe->nhit counter is not incremented atomically, so its value > is questionable in when uprobe is hit on multiple CPUs simultaneously. > > Also, doing this shared counter increment across many CPUs causes heavy > cache line bouncing, limiting uprobe/uretprobe performance scaling with > number of CPUs. > > Solve both problems by making this a per-CPU counter. > Looks good to me. Let me pick it to linux-trace probes/for-next. Thank you, > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > --- > kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++--- > 1 file changed, 21 insertions(+), 3 deletions(-) > > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index c98e3b3386ba..c3df411a2684 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -17,6 +17,7 @@ > #include <linux/string.h> > #include <linux/rculist.h> > #include <linux/filter.h> > +#include <linux/percpu.h> > > #include "trace_dynevent.h" > #include "trace_probe.h" > @@ -62,7 +63,7 @@ struct trace_uprobe { > char *filename; > unsigned long offset; > unsigned long ref_ctr_offset; > - unsigned long nhit; > + unsigned long __percpu *nhits; > struct trace_probe tp; > }; > > @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > if (!tu) > return ERR_PTR(-ENOMEM); > > + tu->nhits = alloc_percpu(unsigned long); > + if (!tu->nhits) { > + ret = -ENOMEM; > + goto error; > + } > + > ret = trace_probe_init(&tu->tp, event, group, true, nargs); > if (ret < 0) > goto error; > @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > return tu; > > error: > + free_percpu(tu->nhits); > kfree(tu); > > return ERR_PTR(ret); > @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) > path_put(&tu->path); > trace_probe_cleanup(&tu->tp); > kfree(tu->filename); > + free_percpu(tu->nhits); > kfree(tu); > } > > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) > { > struct dyn_event *ev = v; > struct trace_uprobe *tu; > + unsigned long nhits; > + int cpu; > > if (!is_trace_uprobe(ev)) > return 0; > > tu = to_trace_uprobe(ev); > + > + nhits = 0; > + for_each_possible_cpu(cpu) { > + nhits += per_cpu(*tu->nhits, cpu); > + } > + > seq_printf(m, " %s %-44s %15lu\n", tu->filename, > - trace_probe_name(&tu->tp), tu->nhit); > + trace_probe_name(&tu->tp), nhits); > return 0; > } > > @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) > int ret = 0; > > tu = container_of(con, struct trace_uprobe, consumer); > - tu->nhit++; > + > + this_cpu_inc(*tu->nhits); > > udd.tu = tu; > udd.bp_addr = instruction_pointer(regs); > -- > 2.43.5 >
On Tue, Aug 27, 2024 at 8:55 PM Masami Hiramatsu <mhiramat@kernel.org> wrote: > > On Tue, 13 Aug 2024 13:34:09 -0700 > Andrii Nakryiko <andrii@kernel.org> wrote: > > > trace_uprobe->nhit counter is not incremented atomically, so its value > > is questionable in when uprobe is hit on multiple CPUs simultaneously. > > > > Also, doing this shared counter increment across many CPUs causes heavy > > cache line bouncing, limiting uprobe/uretprobe performance scaling with > > number of CPUs. > > > > Solve both problems by making this a per-CPU counter. > > > > Looks good to me. Let me pick it to linux-trace probes/for-next. > Thanks! I just checked linux-trace repo, doesn't seem like this was applied yet, is that right? Or am I checking in the wrong place? > Thank you, > > > > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > > --- > > kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++--- > > 1 file changed, 21 insertions(+), 3 deletions(-) > > > > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > > index c98e3b3386ba..c3df411a2684 100644 > > --- a/kernel/trace/trace_uprobe.c > > +++ b/kernel/trace/trace_uprobe.c > > @@ -17,6 +17,7 @@ > > #include <linux/string.h> > > #include <linux/rculist.h> > > #include <linux/filter.h> > > +#include <linux/percpu.h> > > > > #include "trace_dynevent.h" > > #include "trace_probe.h" > > @@ -62,7 +63,7 @@ struct trace_uprobe { > > char *filename; > > unsigned long offset; > > unsigned long ref_ctr_offset; > > - unsigned long nhit; > > + unsigned long __percpu *nhits; > > struct trace_probe tp; > > }; > > > > @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > > if (!tu) > > return ERR_PTR(-ENOMEM); > > > > + tu->nhits = alloc_percpu(unsigned long); > > + if (!tu->nhits) { > > + ret = -ENOMEM; > > + goto error; > > + } > > + > > ret = trace_probe_init(&tu->tp, event, group, true, nargs); > > if (ret < 0) > > goto error; > > @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > > return tu; > > > > error: > > + free_percpu(tu->nhits); > > kfree(tu); > > > > return ERR_PTR(ret); > > @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) > > path_put(&tu->path); > > trace_probe_cleanup(&tu->tp); > > kfree(tu->filename); > > + free_percpu(tu->nhits); > > kfree(tu); > > } > > > > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) > > { > > struct dyn_event *ev = v; > > struct trace_uprobe *tu; > > + unsigned long nhits; > > + int cpu; > > > > if (!is_trace_uprobe(ev)) > > return 0; > > > > tu = to_trace_uprobe(ev); > > + > > + nhits = 0; > > + for_each_possible_cpu(cpu) { > > + nhits += per_cpu(*tu->nhits, cpu); > > + } > > + > > seq_printf(m, " %s %-44s %15lu\n", tu->filename, > > - trace_probe_name(&tu->tp), tu->nhit); > > + trace_probe_name(&tu->tp), nhits); > > return 0; > > } > > > > @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) > > int ret = 0; > > > > tu = container_of(con, struct trace_uprobe, consumer); > > - tu->nhit++; > > + > > + this_cpu_inc(*tu->nhits); > > > > udd.tu = tu; > > udd.bp_addr = instruction_pointer(regs); > > -- > > 2.43.5 > > > > > -- > Masami Hiramatsu (Google) <mhiramat@kernel.org> >
On Thu, 29 Aug 2024 10:28:24 -0700 Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote: > On Tue, Aug 27, 2024 at 8:55 PM Masami Hiramatsu <mhiramat@kernel.org> wrote: > > > > On Tue, 13 Aug 2024 13:34:09 -0700 > > Andrii Nakryiko <andrii@kernel.org> wrote: > > > > > trace_uprobe->nhit counter is not incremented atomically, so its value > > > is questionable in when uprobe is hit on multiple CPUs simultaneously. > > > > > > Also, doing this shared counter increment across many CPUs causes heavy > > > cache line bouncing, limiting uprobe/uretprobe performance scaling with > > > number of CPUs. > > > > > > Solve both problems by making this a per-CPU counter. > > > > > > > Looks good to me. Let me pick it to linux-trace probes/for-next. > > > > Thanks! I just checked linux-trace repo, doesn't seem like this was > applied yet, is that right? Or am I checking in the wrong place? Sorry, I missed to push probes/for-next. Let me push it. Thank you, > > > Thank you, > > > > > > > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > > > --- > > > kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++--- > > > 1 file changed, 21 insertions(+), 3 deletions(-) > > > > > > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > > > index c98e3b3386ba..c3df411a2684 100644 > > > --- a/kernel/trace/trace_uprobe.c > > > +++ b/kernel/trace/trace_uprobe.c > > > @@ -17,6 +17,7 @@ > > > #include <linux/string.h> > > > #include <linux/rculist.h> > > > #include <linux/filter.h> > > > +#include <linux/percpu.h> > > > > > > #include "trace_dynevent.h" > > > #include "trace_probe.h" > > > @@ -62,7 +63,7 @@ struct trace_uprobe { > > > char *filename; > > > unsigned long offset; > > > unsigned long ref_ctr_offset; > > > - unsigned long nhit; > > > + unsigned long __percpu *nhits; > > > struct trace_probe tp; > > > }; > > > > > > @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > > > if (!tu) > > > return ERR_PTR(-ENOMEM); > > > > > > + tu->nhits = alloc_percpu(unsigned long); > > > + if (!tu->nhits) { > > > + ret = -ENOMEM; > > > + goto error; > > > + } > > > + > > > ret = trace_probe_init(&tu->tp, event, group, true, nargs); > > > if (ret < 0) > > > goto error; > > > @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) > > > return tu; > > > > > > error: > > > + free_percpu(tu->nhits); > > > kfree(tu); > > > > > > return ERR_PTR(ret); > > > @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) > > > path_put(&tu->path); > > > trace_probe_cleanup(&tu->tp); > > > kfree(tu->filename); > > > + free_percpu(tu->nhits); > > > kfree(tu); > > > } > > > > > > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) > > > { > > > struct dyn_event *ev = v; > > > struct trace_uprobe *tu; > > > + unsigned long nhits; > > > + int cpu; > > > > > > if (!is_trace_uprobe(ev)) > > > return 0; > > > > > > tu = to_trace_uprobe(ev); > > > + > > > + nhits = 0; > > > + for_each_possible_cpu(cpu) { > > > + nhits += per_cpu(*tu->nhits, cpu); > > > + } > > > + > > > seq_printf(m, " %s %-44s %15lu\n", tu->filename, > > > - trace_probe_name(&tu->tp), tu->nhit); > > > + trace_probe_name(&tu->tp), nhits); > > > return 0; > > > } > > > > > > @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) > > > int ret = 0; > > > > > > tu = container_of(con, struct trace_uprobe, consumer); > > > - tu->nhit++; > > > + > > > + this_cpu_inc(*tu->nhits); > > > > > > udd.tu = tu; > > > udd.bp_addr = instruction_pointer(regs); > > > -- > > > 2.43.5 > > > > > > > > > -- > > Masami Hiramatsu (Google) <mhiramat@kernel.org> > >
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c98e3b3386ba..c3df411a2684 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -17,6 +17,7 @@ #include <linux/string.h> #include <linux/rculist.h> #include <linux/filter.h> +#include <linux/percpu.h> #include "trace_dynevent.h" #include "trace_probe.h" @@ -62,7 +63,7 @@ struct trace_uprobe { char *filename; unsigned long offset; unsigned long ref_ctr_offset; - unsigned long nhit; + unsigned long __percpu *nhits; struct trace_probe tp; }; @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) if (!tu) return ERR_PTR(-ENOMEM); + tu->nhits = alloc_percpu(unsigned long); + if (!tu->nhits) { + ret = -ENOMEM; + goto error; + } + ret = trace_probe_init(&tu->tp, event, group, true, nargs); if (ret < 0) goto error; @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) return tu; error: + free_percpu(tu->nhits); kfree(tu); return ERR_PTR(ret); @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) path_put(&tu->path); trace_probe_cleanup(&tu->tp); kfree(tu->filename); + free_percpu(tu->nhits); kfree(tu); } @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) { struct dyn_event *ev = v; struct trace_uprobe *tu; + unsigned long nhits; + int cpu; if (!is_trace_uprobe(ev)) return 0; tu = to_trace_uprobe(ev); + + nhits = 0; + for_each_possible_cpu(cpu) { + nhits += per_cpu(*tu->nhits, cpu); + } + seq_printf(m, " %s %-44s %15lu\n", tu->filename, - trace_probe_name(&tu->tp), tu->nhit); + trace_probe_name(&tu->tp), nhits); return 0; } @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) int ret = 0; tu = container_of(con, struct trace_uprobe, consumer); - tu->nhit++; + + this_cpu_inc(*tu->nhits); udd.tu = tu; udd.bp_addr = instruction_pointer(regs);