@@ -41,9 +41,20 @@ int lock_contention_prepare(struct lock_contention *con)
else
bpf_map__set_max_entries(skel->maps.task_data, 1);
- if (con->save_callstack)
- bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
- else
+ if (con->save_callstack) {
+ bpf_map__set_max_entries(skel->maps.stacks,
+ con->map_nr_entries);
+ if (con->owner) {
+ bpf_map__set_value_size(skel->maps.owner_stacks_entries,
+ con->max_stack * sizeof(u64));
+ bpf_map__set_value_size(
+ skel->maps.contention_owner_stacks,
+ con->max_stack * sizeof(u64));
+ bpf_map__set_key_size(skel->maps.owner_lock_stat,
+ con->max_stack * sizeof(u64));
+ skel->rodata->max_stack = con->max_stack;
+ }
+ } else
bpf_map__set_max_entries(skel->maps.stacks, 1);
if (target__has_cpu(target)) {
@@ -19,13 +19,37 @@
#define LCB_F_PERCPU (1U << 4)
#define LCB_F_MUTEX (1U << 5)
-/* callstack storage */
+ /* tmp buffer for owner callstack */
struct {
- __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u64));
+ __uint(max_entries, 1);
+} owner_stacks_entries SEC(".maps");
+
+/* a map for tracing lock address to owner data */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64)); // lock address
+ __uint(value_size, sizeof(cotd));
__uint(max_entries, MAX_ENTRIES);
-} stacks SEC(".maps");
+} contention_owner_tracing SEC(".maps");
+
+/* a map for tracing lock address to owner stacktrace */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64)); // lock address
+ __uint(value_size, sizeof(__u64)); // straktrace
+ __uint(max_entries, MAX_ENTRIES);
+} contention_owner_stacks SEC(".maps");
+
+/* owner callstack to contention data storage */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(struct contention_data));
+ __uint(max_entries, MAX_ENTRIES);
+} owner_lock_stat SEC(".maps");
/* maintain timestamp at the beginning of contention */
struct {
@@ -43,6 +67,14 @@ struct {
__uint(max_entries, 1);
} tstamp_cpu SEC(".maps");
+/* callstack storage */
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} stacks SEC(".maps");
+
/* actual lock contention statistics */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
@@ -126,6 +158,7 @@ const volatile int needs_callstack;
const volatile int stack_skip;
const volatile int lock_owner;
const volatile int use_cgroup_v2;
+const volatile int max_stack;
/* determine the key of lock stat */
const volatile int aggr_mode;
@@ -436,7 +469,6 @@ int contention_end(u64 *ctx)
return 0;
need_delete = true;
}
-
duration = bpf_ktime_get_ns() - pelem->timestamp;
if ((__s64)duration < 0) {
__sync_fetch_and_add(&time_fail, 1);
@@ -3,6 +3,12 @@
#ifndef UTIL_BPF_SKEL_LOCK_DATA_H
#define UTIL_BPF_SKEL_LOCK_DATA_H
+typedef struct contention_owner_tracing_data {
+ u32 pid; // Who has the lock.
+ u64 timestamp; // The time while the owner acquires lock and contention is going on.
+ u32 count; // How many waiters for this lock.
+} cotd;
+
struct tstamp_data {
u64 timestamp;
u64 lock;
Add few bpf maps in order to tracing owner stack. Signed-off-by: Chun-Tse Shao <ctshao@google.com> --- tools/perf/util/bpf_lock_contention.c | 17 ++++++-- .../perf/util/bpf_skel/lock_contention.bpf.c | 40 +++++++++++++++++-- tools/perf/util/bpf_skel/lock_data.h | 6 +++ 3 files changed, 56 insertions(+), 7 deletions(-)