Message ID | 20220518224725.742882-4-namhyung@kernel.org (mailing list archive) |
---|---|
State | RFC |
Delegated to: | BPF |
Headers | show |
Series | perf record: Implement off-cpu profiling with BPF (v3) | expand |
Context | Check | Description |
---|---|---|
bpf/vmtest-bpf-next-VM_Test-2 | success | Logs for Kernel LATEST on ubuntu-latest with llvm-15 |
bpf/vmtest-bpf-next-VM_Test-3 | fail | Logs for Kernel LATEST on z15 with gcc |
bpf/vmtest-bpf-next-PR | fail | PR summary |
bpf/vmtest-bpf-next-VM_Test-1 | success | Logs for Kernel LATEST on ubuntu-latest with gcc |
netdev/tree_selection | success | Not a local patch |
On Wed, May 18, 2022 at 3:47 PM Namhyung Kim <namhyung@kernel.org> wrote: > > It should honor cpu and task filtering with -a, -C or -p, -t options. > > Signed-off-by: Namhyung Kim <namhyung@kernel.org> > --- > tools/perf/builtin-record.c | 2 +- > tools/perf/util/bpf_off_cpu.c | 78 +++++++++++++++++++++++--- > tools/perf/util/bpf_skel/off_cpu.bpf.c | 52 +++++++++++++++-- > tools/perf/util/off_cpu.h | 6 +- > 4 files changed, 123 insertions(+), 15 deletions(-) > > diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c > index 91f88501412e..7f60d2eac0b4 100644 > --- a/tools/perf/builtin-record.c > +++ b/tools/perf/builtin-record.c > @@ -907,7 +907,7 @@ static int record__config_text_poke(struct evlist *evlist) > > static int record__config_off_cpu(struct record *rec) > { > - return off_cpu_prepare(rec->evlist); > + return off_cpu_prepare(rec->evlist, &rec->opts.target); > } > > static bool record__kcore_readable(struct machine *machine) > diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c > index 9ed7aca3f4ac..b5e2d038da50 100644 > --- a/tools/perf/util/bpf_off_cpu.c > +++ b/tools/perf/util/bpf_off_cpu.c > @@ -6,6 +6,9 @@ > #include "util/off_cpu.h" > #include "util/perf-hooks.h" > #include "util/session.h" > +#include "util/target.h" > +#include "util/cpumap.h" > +#include "util/thread_map.h" > #include <bpf/bpf.h> > > #include "bpf_skel/off_cpu.skel.h" > @@ -60,8 +63,23 @@ static int off_cpu_config(struct evlist *evlist) > return 0; > } > > -static void off_cpu_start(void *arg __maybe_unused) > +static void off_cpu_start(void *arg) > { > + struct evlist *evlist = arg; > + > + /* update task filter for the given workload */ > + if (!skel->bss->has_cpu && !skel->bss->has_task && > + perf_thread_map__pid(evlist->core.threads, 0) != -1) { > + int fd; > + u32 pid; > + u8 val = 1; > + > + skel->bss->has_task = 1; > + fd = bpf_map__fd(skel->maps.task_filter); > + pid = perf_thread_map__pid(evlist->core.threads, 0); > + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); > + } > + > skel->bss->enabled = 1; > } > > @@ -71,31 +89,75 @@ static void off_cpu_finish(void *arg __maybe_unused) > off_cpu_bpf__destroy(skel); > } > > -int off_cpu_prepare(struct evlist *evlist) > +int off_cpu_prepare(struct evlist *evlist, struct target *target) > { > - int err; > + int err, fd, i; > + int ncpus = 1, ntasks = 1; > > if (off_cpu_config(evlist) < 0) { > pr_err("Failed to config off-cpu BPF event\n"); > return -1; > } > > - set_max_rlimit(); > - > - skel = off_cpu_bpf__open_and_load(); > + skel = off_cpu_bpf__open(); > if (!skel) { > pr_err("Failed to open off-cpu BPF skeleton\n"); > return -1; > } > > + /* don't need to set cpu filter for system-wide mode */ > + if (target->cpu_list) { > + ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); > + bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); > + } > + > + if (target__has_task(target)) { > + ntasks = perf_thread_map__nr(evlist->core.threads); > + bpf_map__set_max_entries(skel->maps.task_filter, ntasks); > + } > + > + set_max_rlimit(); > + > + err = off_cpu_bpf__load(skel); > + if (err) { > + pr_err("Failed to load off-cpu skeleton\n"); > + goto out; > + } > + > + if (target->cpu_list) { > + u32 cpu; > + u8 val = 1; > + > + skel->bss->has_cpu = 1; > + fd = bpf_map__fd(skel->maps.cpu_filter); > + > + for (i = 0; i < ncpus; i++) { > + cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; > + bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); Perhaps more concise with a for_each: perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.user_requested_cpus) bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY); > + } > + } > + > + if (target__has_task(target)) { > + u32 pid; > + u8 val = 1; > + > + skel->bss->has_task = 1; > + fd = bpf_map__fd(skel->maps.task_filter); > + > + for (i = 0; i < ntasks; i++) { > + pid = perf_thread_map__pid(evlist->core.threads, i); > + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); > + } > + } > + > err = off_cpu_bpf__attach(skel); > if (err) { > pr_err("Failed to attach off-cpu BPF skeleton\n"); > goto out; > } > > - if (perf_hooks__set_hook("record_start", off_cpu_start, NULL) || > - perf_hooks__set_hook("record_end", off_cpu_finish, NULL)) { > + if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) || > + perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) { > pr_err("Failed to attach off-cpu skeleton\n"); > goto out; > } > diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c > index 5173ed882fdf..78cdcc8ff863 100644 > --- a/tools/perf/util/bpf_skel/off_cpu.bpf.c > +++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c > @@ -49,12 +49,28 @@ struct { > __uint(max_entries, MAX_ENTRIES); > } off_cpu SEC(".maps"); > > +struct { > + __uint(type, BPF_MAP_TYPE_HASH); > + __uint(key_size, sizeof(__u32)); > + __uint(value_size, sizeof(__u8)); > + __uint(max_entries, 1); > +} cpu_filter SEC(".maps"); > + > +struct { > + __uint(type, BPF_MAP_TYPE_HASH); > + __uint(key_size, sizeof(__u32)); > + __uint(value_size, sizeof(__u8)); > + __uint(max_entries, 1); > +} task_filter SEC(".maps"); > + > /* old kernel task_struct definition */ > struct task_struct___old { > long state; > } __attribute__((preserve_access_index)); > > int enabled = 0; > +int has_cpu = 0; > +int has_task = 0; > > /* > * Old kernel used to call it task_struct->state and now it's '__state'. > @@ -74,6 +90,37 @@ static inline int get_task_state(struct task_struct *t) > return BPF_CORE_READ(t_old, state); > } > > +static inline int can_record(struct task_struct *t, int state) > +{ > + /* kernel threads don't have user stack */ > + if (t->flags & PF_KTHREAD) > + return 0; > + > + if (state != TASK_INTERRUPTIBLE && > + state != TASK_UNINTERRUPTIBLE) > + return 0; > + > + if (has_cpu) { > + __u32 cpu = bpf_get_smp_processor_id(); > + __u8 *ok; > + > + ok = bpf_map_lookup_elem(&cpu_filter, &cpu); > + if (!ok) > + return 0; > + } > + > + if (has_task) { > + __u8 *ok; > + __u32 pid = t->pid; > + > + ok = bpf_map_lookup_elem(&task_filter, &pid); > + if (!ok) > + return 0; > + } > + > + return 1; > +} > + > SEC("tp_btf/sched_switch") > int on_switch(u64 *ctx) > { > @@ -92,10 +139,7 @@ int on_switch(u64 *ctx) > > ts = bpf_ktime_get_ns(); > > - if (prev->flags & PF_KTHREAD) > - goto next; > - if (state != TASK_INTERRUPTIBLE && > - state != TASK_UNINTERRUPTIBLE) > + if (!can_record(prev, state)) > goto next; > > stack_id = bpf_get_stackid(ctx, &stacks, > diff --git a/tools/perf/util/off_cpu.h b/tools/perf/util/off_cpu.h > index 375d03c424ea..f47af0232e55 100644 > --- a/tools/perf/util/off_cpu.h > +++ b/tools/perf/util/off_cpu.h > @@ -2,15 +2,17 @@ > #define PERF_UTIL_OFF_CPU_H > > struct evlist; > +struct target; > struct perf_session; > > #define OFFCPU_EVENT "offcpu-time" > > #ifdef HAVE_BPF_SKEL > -int off_cpu_prepare(struct evlist *evlist); > +int off_cpu_prepare(struct evlist *evlist, struct target *target); > int off_cpu_write(struct perf_session *session); > #else > -static inline int off_cpu_prepare(struct evlist *evlist __maybe_unused) > +static inline int off_cpu_prepare(struct evlist *evlist __maybe_unused, > + struct target *target __maybe_unused) > { > return -1; > } > -- > 2.36.1.124.g0e6072fb45-goog >
On Wed, May 18, 2022 at 9:02 PM Ian Rogers <irogers@google.com> wrote: > > On Wed, May 18, 2022 at 3:47 PM Namhyung Kim <namhyung@kernel.org> wrote: > > > > It should honor cpu and task filtering with -a, -C or -p, -t options. > > > > Signed-off-by: Namhyung Kim <namhyung@kernel.org> > > --- > > tools/perf/builtin-record.c | 2 +- > > tools/perf/util/bpf_off_cpu.c | 78 +++++++++++++++++++++++--- > > tools/perf/util/bpf_skel/off_cpu.bpf.c | 52 +++++++++++++++-- > > tools/perf/util/off_cpu.h | 6 +- > > 4 files changed, 123 insertions(+), 15 deletions(-) > > > > diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c > > index 91f88501412e..7f60d2eac0b4 100644 > > --- a/tools/perf/builtin-record.c > > +++ b/tools/perf/builtin-record.c > > @@ -907,7 +907,7 @@ static int record__config_text_poke(struct evlist *evlist) > > > > static int record__config_off_cpu(struct record *rec) > > { > > - return off_cpu_prepare(rec->evlist); > > + return off_cpu_prepare(rec->evlist, &rec->opts.target); > > } > > > > static bool record__kcore_readable(struct machine *machine) > > diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c > > index 9ed7aca3f4ac..b5e2d038da50 100644 > > --- a/tools/perf/util/bpf_off_cpu.c > > +++ b/tools/perf/util/bpf_off_cpu.c > > @@ -6,6 +6,9 @@ > > #include "util/off_cpu.h" > > #include "util/perf-hooks.h" > > #include "util/session.h" > > +#include "util/target.h" > > +#include "util/cpumap.h" > > +#include "util/thread_map.h" > > #include <bpf/bpf.h> > > > > #include "bpf_skel/off_cpu.skel.h" > > @@ -60,8 +63,23 @@ static int off_cpu_config(struct evlist *evlist) > > return 0; > > } > > > > -static void off_cpu_start(void *arg __maybe_unused) > > +static void off_cpu_start(void *arg) > > { > > + struct evlist *evlist = arg; > > + > > + /* update task filter for the given workload */ > > + if (!skel->bss->has_cpu && !skel->bss->has_task && > > + perf_thread_map__pid(evlist->core.threads, 0) != -1) { > > + int fd; > > + u32 pid; > > + u8 val = 1; > > + > > + skel->bss->has_task = 1; > > + fd = bpf_map__fd(skel->maps.task_filter); > > + pid = perf_thread_map__pid(evlist->core.threads, 0); > > + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); > > + } > > + > > skel->bss->enabled = 1; > > } > > > > @@ -71,31 +89,75 @@ static void off_cpu_finish(void *arg __maybe_unused) > > off_cpu_bpf__destroy(skel); > > } > > > > -int off_cpu_prepare(struct evlist *evlist) > > +int off_cpu_prepare(struct evlist *evlist, struct target *target) > > { > > - int err; > > + int err, fd, i; > > + int ncpus = 1, ntasks = 1; > > > > if (off_cpu_config(evlist) < 0) { > > pr_err("Failed to config off-cpu BPF event\n"); > > return -1; > > } > > > > - set_max_rlimit(); > > - > > - skel = off_cpu_bpf__open_and_load(); > > + skel = off_cpu_bpf__open(); > > if (!skel) { > > pr_err("Failed to open off-cpu BPF skeleton\n"); > > return -1; > > } > > > > + /* don't need to set cpu filter for system-wide mode */ > > + if (target->cpu_list) { > > + ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); > > + bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); > > + } > > + > > + if (target__has_task(target)) { > > + ntasks = perf_thread_map__nr(evlist->core.threads); > > + bpf_map__set_max_entries(skel->maps.task_filter, ntasks); > > + } > > + > > + set_max_rlimit(); > > + > > + err = off_cpu_bpf__load(skel); > > + if (err) { > > + pr_err("Failed to load off-cpu skeleton\n"); > > + goto out; > > + } > > + > > + if (target->cpu_list) { > > + u32 cpu; > > + u8 val = 1; > > + > > + skel->bss->has_cpu = 1; > > + fd = bpf_map__fd(skel->maps.cpu_filter); > > + > > + for (i = 0; i < ncpus; i++) { > > + cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; > > + bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); > > Perhaps more concise with a for_each: > > perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.user_requested_cpus) > bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY); Will change. Thanks, Namhyung
Em Thu, May 19, 2022 at 02:02:28PM -0700, Namhyung Kim escreveu: > On Wed, May 18, 2022 at 9:02 PM Ian Rogers <irogers@google.com> wrote: > > > > On Wed, May 18, 2022 at 3:47 PM Namhyung Kim <namhyung@kernel.org> wrote: > > > > > > It should honor cpu and task filtering with -a, -C or -p, -t options. > > > > > > Signed-off-by: Namhyung Kim <namhyung@kernel.org> > > > --- > > > tools/perf/builtin-record.c | 2 +- > > > tools/perf/util/bpf_off_cpu.c | 78 +++++++++++++++++++++++--- > > > tools/perf/util/bpf_skel/off_cpu.bpf.c | 52 +++++++++++++++-- > > > tools/perf/util/off_cpu.h | 6 +- > > > 4 files changed, 123 insertions(+), 15 deletions(-) > > > > > > diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c > > > index 91f88501412e..7f60d2eac0b4 100644 > > > --- a/tools/perf/builtin-record.c > > > +++ b/tools/perf/builtin-record.c > > > @@ -907,7 +907,7 @@ static int record__config_text_poke(struct evlist *evlist) > > > > > > static int record__config_off_cpu(struct record *rec) > > > { > > > - return off_cpu_prepare(rec->evlist); > > > + return off_cpu_prepare(rec->evlist, &rec->opts.target); > > > } > > > > > > static bool record__kcore_readable(struct machine *machine) > > > diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c > > > index 9ed7aca3f4ac..b5e2d038da50 100644 > > > --- a/tools/perf/util/bpf_off_cpu.c > > > +++ b/tools/perf/util/bpf_off_cpu.c > > > @@ -6,6 +6,9 @@ > > > #include "util/off_cpu.h" > > > #include "util/perf-hooks.h" > > > #include "util/session.h" > > > +#include "util/target.h" > > > +#include "util/cpumap.h" > > > +#include "util/thread_map.h" > > > #include <bpf/bpf.h> > > > > > > #include "bpf_skel/off_cpu.skel.h" > > > @@ -60,8 +63,23 @@ static int off_cpu_config(struct evlist *evlist) > > > return 0; > > > } > > > > > > -static void off_cpu_start(void *arg __maybe_unused) > > > +static void off_cpu_start(void *arg) > > > { > > > + struct evlist *evlist = arg; > > > + > > > + /* update task filter for the given workload */ > > > + if (!skel->bss->has_cpu && !skel->bss->has_task && > > > + perf_thread_map__pid(evlist->core.threads, 0) != -1) { > > > + int fd; > > > + u32 pid; > > > + u8 val = 1; > > > + > > > + skel->bss->has_task = 1; > > > + fd = bpf_map__fd(skel->maps.task_filter); > > > + pid = perf_thread_map__pid(evlist->core.threads, 0); > > > + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); > > > + } > > > + > > > skel->bss->enabled = 1; > > > } > > > > > > @@ -71,31 +89,75 @@ static void off_cpu_finish(void *arg __maybe_unused) > > > off_cpu_bpf__destroy(skel); > > > } > > > > > > -int off_cpu_prepare(struct evlist *evlist) > > > +int off_cpu_prepare(struct evlist *evlist, struct target *target) > > > { > > > - int err; > > > + int err, fd, i; > > > + int ncpus = 1, ntasks = 1; > > > > > > if (off_cpu_config(evlist) < 0) { > > > pr_err("Failed to config off-cpu BPF event\n"); > > > return -1; > > > } > > > > > > - set_max_rlimit(); > > > - > > > - skel = off_cpu_bpf__open_and_load(); > > > + skel = off_cpu_bpf__open(); > > > if (!skel) { > > > pr_err("Failed to open off-cpu BPF skeleton\n"); > > > return -1; > > > } > > > > > > + /* don't need to set cpu filter for system-wide mode */ > > > + if (target->cpu_list) { > > > + ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); > > > + bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); > > > + } > > > + > > > + if (target__has_task(target)) { > > > + ntasks = perf_thread_map__nr(evlist->core.threads); > > > + bpf_map__set_max_entries(skel->maps.task_filter, ntasks); > > > + } > > > + > > > + set_max_rlimit(); > > > + > > > + err = off_cpu_bpf__load(skel); > > > + if (err) { > > > + pr_err("Failed to load off-cpu skeleton\n"); > > > + goto out; > > > + } > > > + > > > + if (target->cpu_list) { > > > + u32 cpu; > > > + u8 val = 1; > > > + > > > + skel->bss->has_cpu = 1; > > > + fd = bpf_map__fd(skel->maps.cpu_filter); > > > + > > > + for (i = 0; i < ncpus; i++) { > > > + cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; > > > + bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); > > > > Perhaps more concise with a for_each: > > > > perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.user_requested_cpus) > > bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY); So I'll wait for a new version of this patchset. - Arnaldo
Em Wed, May 25, 2022 at 08:27:38AM -0300, Arnaldo Carvalho de Melo escreveu: > Em Thu, May 19, 2022 at 02:02:28PM -0700, Namhyung Kim escreveu: > > On Wed, May 18, 2022 at 9:02 PM Ian Rogers <irogers@google.com> wrote: > > > > > > On Wed, May 18, 2022 at 3:47 PM Namhyung Kim <namhyung@kernel.org> wrote: > > > > > > > > It should honor cpu and task filtering with -a, -C or -p, -t options. > > > > > > > > Signed-off-by: Namhyung Kim <namhyung@kernel.org> > > > > --- > > > > tools/perf/builtin-record.c | 2 +- > > > > tools/perf/util/bpf_off_cpu.c | 78 +++++++++++++++++++++++--- > > > > tools/perf/util/bpf_skel/off_cpu.bpf.c | 52 +++++++++++++++-- > > > > tools/perf/util/off_cpu.h | 6 +- > > > > 4 files changed, 123 insertions(+), 15 deletions(-) > > > > > > > > diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c > > > > index 91f88501412e..7f60d2eac0b4 100644 > > > > --- a/tools/perf/builtin-record.c > > > > +++ b/tools/perf/builtin-record.c > > > > @@ -907,7 +907,7 @@ static int record__config_text_poke(struct evlist *evlist) > > > > > > > > static int record__config_off_cpu(struct record *rec) > > > > { > > > > - return off_cpu_prepare(rec->evlist); > > > > + return off_cpu_prepare(rec->evlist, &rec->opts.target); > > > > } > > > > > > > > static bool record__kcore_readable(struct machine *machine) > > > > diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c > > > > index 9ed7aca3f4ac..b5e2d038da50 100644 > > > > --- a/tools/perf/util/bpf_off_cpu.c > > > > +++ b/tools/perf/util/bpf_off_cpu.c > > > > @@ -6,6 +6,9 @@ > > > > #include "util/off_cpu.h" > > > > #include "util/perf-hooks.h" > > > > #include "util/session.h" > > > > +#include "util/target.h" > > > > +#include "util/cpumap.h" > > > > +#include "util/thread_map.h" > > > > #include <bpf/bpf.h> > > > > > > > > #include "bpf_skel/off_cpu.skel.h" > > > > @@ -60,8 +63,23 @@ static int off_cpu_config(struct evlist *evlist) > > > > return 0; > > > > } > > > > > > > > -static void off_cpu_start(void *arg __maybe_unused) > > > > +static void off_cpu_start(void *arg) > > > > { > > > > + struct evlist *evlist = arg; > > > > + > > > > + /* update task filter for the given workload */ > > > > + if (!skel->bss->has_cpu && !skel->bss->has_task && > > > > + perf_thread_map__pid(evlist->core.threads, 0) != -1) { > > > > + int fd; > > > > + u32 pid; > > > > + u8 val = 1; > > > > + > > > > + skel->bss->has_task = 1; > > > > + fd = bpf_map__fd(skel->maps.task_filter); > > > > + pid = perf_thread_map__pid(evlist->core.threads, 0); > > > > + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); > > > > + } > > > > + > > > > skel->bss->enabled = 1; > > > > } > > > > > > > > @@ -71,31 +89,75 @@ static void off_cpu_finish(void *arg __maybe_unused) > > > > off_cpu_bpf__destroy(skel); > > > > } > > > > > > > > -int off_cpu_prepare(struct evlist *evlist) > > > > +int off_cpu_prepare(struct evlist *evlist, struct target *target) > > > > { > > > > - int err; > > > > + int err, fd, i; > > > > + int ncpus = 1, ntasks = 1; > > > > > > > > if (off_cpu_config(evlist) < 0) { > > > > pr_err("Failed to config off-cpu BPF event\n"); > > > > return -1; > > > > } > > > > > > > > - set_max_rlimit(); > > > > - > > > > - skel = off_cpu_bpf__open_and_load(); > > > > + skel = off_cpu_bpf__open(); > > > > if (!skel) { > > > > pr_err("Failed to open off-cpu BPF skeleton\n"); > > > > return -1; > > > > } > > > > > > > > + /* don't need to set cpu filter for system-wide mode */ > > > > + if (target->cpu_list) { > > > > + ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); > > > > + bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); > > > > + } > > > > + > > > > + if (target__has_task(target)) { > > > > + ntasks = perf_thread_map__nr(evlist->core.threads); > > > > + bpf_map__set_max_entries(skel->maps.task_filter, ntasks); > > > > + } > > > > + > > > > + set_max_rlimit(); > > > > + > > > > + err = off_cpu_bpf__load(skel); > > > > + if (err) { > > > > + pr_err("Failed to load off-cpu skeleton\n"); > > > > + goto out; > > > > + } > > > > + > > > > + if (target->cpu_list) { > > > > + u32 cpu; > > > > + u8 val = 1; > > > > + > > > > + skel->bss->has_cpu = 1; > > > > + fd = bpf_map__fd(skel->maps.cpu_filter); > > > > + > > > > + for (i = 0; i < ncpus; i++) { > > > > + cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; > > > > + bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); > > > > > > Perhaps more concise with a for_each: > > > > > > perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.user_requested_cpus) > > > bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY); > > So I'll wait for a new version of this patchset. I take that back, will apply and this can be a follow up patch, right? - Arnaldo
Em Wed, May 25, 2022 at 08:44:38AM -0300, Arnaldo Carvalho de Melo escreveu: > Em Wed, May 25, 2022 at 08:27:38AM -0300, Arnaldo Carvalho de Melo escreveu: > > Em Thu, May 19, 2022 at 02:02:28PM -0700, Namhyung Kim escreveu: > > > On Wed, May 18, 2022 at 9:02 PM Ian Rogers <irogers@google.com> wrote: > > > > Perhaps more concise with a for_each: > > > > perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.user_requested_cpus) > > > > bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY); > > So I'll wait for a new version of this patchset. > I take that back, will apply and this can be a follow up patch, right? I tested it and added some committer notes, everything is now at my tmp.perf/core branch and will transition to perf/core on its way to 5.19 later today, after tests finish. - Arnaldo
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 91f88501412e..7f60d2eac0b4 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -907,7 +907,7 @@ static int record__config_text_poke(struct evlist *evlist) static int record__config_off_cpu(struct record *rec) { - return off_cpu_prepare(rec->evlist); + return off_cpu_prepare(rec->evlist, &rec->opts.target); } static bool record__kcore_readable(struct machine *machine) diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c index 9ed7aca3f4ac..b5e2d038da50 100644 --- a/tools/perf/util/bpf_off_cpu.c +++ b/tools/perf/util/bpf_off_cpu.c @@ -6,6 +6,9 @@ #include "util/off_cpu.h" #include "util/perf-hooks.h" #include "util/session.h" +#include "util/target.h" +#include "util/cpumap.h" +#include "util/thread_map.h" #include <bpf/bpf.h> #include "bpf_skel/off_cpu.skel.h" @@ -60,8 +63,23 @@ static int off_cpu_config(struct evlist *evlist) return 0; } -static void off_cpu_start(void *arg __maybe_unused) +static void off_cpu_start(void *arg) { + struct evlist *evlist = arg; + + /* update task filter for the given workload */ + if (!skel->bss->has_cpu && !skel->bss->has_task && + perf_thread_map__pid(evlist->core.threads, 0) != -1) { + int fd; + u32 pid; + u8 val = 1; + + skel->bss->has_task = 1; + fd = bpf_map__fd(skel->maps.task_filter); + pid = perf_thread_map__pid(evlist->core.threads, 0); + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); + } + skel->bss->enabled = 1; } @@ -71,31 +89,75 @@ static void off_cpu_finish(void *arg __maybe_unused) off_cpu_bpf__destroy(skel); } -int off_cpu_prepare(struct evlist *evlist) +int off_cpu_prepare(struct evlist *evlist, struct target *target) { - int err; + int err, fd, i; + int ncpus = 1, ntasks = 1; if (off_cpu_config(evlist) < 0) { pr_err("Failed to config off-cpu BPF event\n"); return -1; } - set_max_rlimit(); - - skel = off_cpu_bpf__open_and_load(); + skel = off_cpu_bpf__open(); if (!skel) { pr_err("Failed to open off-cpu BPF skeleton\n"); return -1; } + /* don't need to set cpu filter for system-wide mode */ + if (target->cpu_list) { + ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); + bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); + } + + if (target__has_task(target)) { + ntasks = perf_thread_map__nr(evlist->core.threads); + bpf_map__set_max_entries(skel->maps.task_filter, ntasks); + } + + set_max_rlimit(); + + err = off_cpu_bpf__load(skel); + if (err) { + pr_err("Failed to load off-cpu skeleton\n"); + goto out; + } + + if (target->cpu_list) { + u32 cpu; + u8 val = 1; + + skel->bss->has_cpu = 1; + fd = bpf_map__fd(skel->maps.cpu_filter); + + for (i = 0; i < ncpus; i++) { + cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; + bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); + } + } + + if (target__has_task(target)) { + u32 pid; + u8 val = 1; + + skel->bss->has_task = 1; + fd = bpf_map__fd(skel->maps.task_filter); + + for (i = 0; i < ntasks; i++) { + pid = perf_thread_map__pid(evlist->core.threads, i); + bpf_map_update_elem(fd, &pid, &val, BPF_ANY); + } + } + err = off_cpu_bpf__attach(skel); if (err) { pr_err("Failed to attach off-cpu BPF skeleton\n"); goto out; } - if (perf_hooks__set_hook("record_start", off_cpu_start, NULL) || - perf_hooks__set_hook("record_end", off_cpu_finish, NULL)) { + if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) || + perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) { pr_err("Failed to attach off-cpu skeleton\n"); goto out; } diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c index 5173ed882fdf..78cdcc8ff863 100644 --- a/tools/perf/util/bpf_skel/off_cpu.bpf.c +++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c @@ -49,12 +49,28 @@ struct { __uint(max_entries, MAX_ENTRIES); } off_cpu SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u8)); + __uint(max_entries, 1); +} cpu_filter SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u8)); + __uint(max_entries, 1); +} task_filter SEC(".maps"); + /* old kernel task_struct definition */ struct task_struct___old { long state; } __attribute__((preserve_access_index)); int enabled = 0; +int has_cpu = 0; +int has_task = 0; /* * Old kernel used to call it task_struct->state and now it's '__state'. @@ -74,6 +90,37 @@ static inline int get_task_state(struct task_struct *t) return BPF_CORE_READ(t_old, state); } +static inline int can_record(struct task_struct *t, int state) +{ + /* kernel threads don't have user stack */ + if (t->flags & PF_KTHREAD) + return 0; + + if (state != TASK_INTERRUPTIBLE && + state != TASK_UNINTERRUPTIBLE) + return 0; + + if (has_cpu) { + __u32 cpu = bpf_get_smp_processor_id(); + __u8 *ok; + + ok = bpf_map_lookup_elem(&cpu_filter, &cpu); + if (!ok) + return 0; + } + + if (has_task) { + __u8 *ok; + __u32 pid = t->pid; + + ok = bpf_map_lookup_elem(&task_filter, &pid); + if (!ok) + return 0; + } + + return 1; +} + SEC("tp_btf/sched_switch") int on_switch(u64 *ctx) { @@ -92,10 +139,7 @@ int on_switch(u64 *ctx) ts = bpf_ktime_get_ns(); - if (prev->flags & PF_KTHREAD) - goto next; - if (state != TASK_INTERRUPTIBLE && - state != TASK_UNINTERRUPTIBLE) + if (!can_record(prev, state)) goto next; stack_id = bpf_get_stackid(ctx, &stacks, diff --git a/tools/perf/util/off_cpu.h b/tools/perf/util/off_cpu.h index 375d03c424ea..f47af0232e55 100644 --- a/tools/perf/util/off_cpu.h +++ b/tools/perf/util/off_cpu.h @@ -2,15 +2,17 @@ #define PERF_UTIL_OFF_CPU_H struct evlist; +struct target; struct perf_session; #define OFFCPU_EVENT "offcpu-time" #ifdef HAVE_BPF_SKEL -int off_cpu_prepare(struct evlist *evlist); +int off_cpu_prepare(struct evlist *evlist, struct target *target); int off_cpu_write(struct perf_session *session); #else -static inline int off_cpu_prepare(struct evlist *evlist __maybe_unused) +static inline int off_cpu_prepare(struct evlist *evlist __maybe_unused, + struct target *target __maybe_unused) { return -1; }
It should honor cpu and task filtering with -a, -C or -p, -t options. Signed-off-by: Namhyung Kim <namhyung@kernel.org> --- tools/perf/builtin-record.c | 2 +- tools/perf/util/bpf_off_cpu.c | 78 +++++++++++++++++++++++--- tools/perf/util/bpf_skel/off_cpu.bpf.c | 52 +++++++++++++++-- tools/perf/util/off_cpu.h | 6 +- 4 files changed, 123 insertions(+), 15 deletions(-)