Message ID | 20240115092727.888096-1-elver@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [RFC,1/2] stackdepot: add stats counters exported via debugfs | expand |
On Mon, Jan 15, 2024 at 10:27 AM Marco Elver <elver@google.com> wrote: > > Add a few basic stats counters for stack depot that can be used to derive if > stack depot is working as intended. This is a snapshot of the new stats after > booting a system with a KASAN-enabled kernel: > > $ cat /sys/kernel/debug/stackdepot/stats > pools: 838 > allocations: 29861 > frees: 6561 > in_use: 23300 > freelist_size: 1840 > > Generally, "pools" should be well below the max; once the system is booted, > "in_use" should remain relatively steady. > > Signed-off-by: Marco Elver <elver@google.com> > --- > lib/stackdepot.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 53 insertions(+) > > diff --git a/lib/stackdepot.c b/lib/stackdepot.c > index a0be5d05c7f0..80a8ca24ccc8 100644 > --- a/lib/stackdepot.c > +++ b/lib/stackdepot.c > @@ -14,6 +14,7 @@ > > #define pr_fmt(fmt) "stackdepot: " fmt > > +#include <linux/debugfs.h> > #include <linux/gfp.h> > #include <linux/jhash.h> > #include <linux/kernel.h> > @@ -115,6 +116,23 @@ static bool new_pool_required = true; > /* Lock that protects the variables above. */ > static DEFINE_RWLOCK(pool_rwlock); > > +/* Statistics counters for debugfs. */ > +enum depot_counter_id { > + DEPOT_COUNTER_ALLOCS, > + DEPOT_COUNTER_FREES, > + DEPOT_COUNTER_INUSE, > + DEPOT_COUNTER_FREELIST_SIZE, > + DEPOT_COUNTER_COUNT, > +}; > +static long counters[DEPOT_COUNTER_COUNT]; > +static const char *const counter_names[] = { > + [DEPOT_COUNTER_ALLOCS] = "allocations", > + [DEPOT_COUNTER_FREES] = "frees", > + [DEPOT_COUNTER_INUSE] = "in_use", > + [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size", > +}; > +static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT); > + > static int __init disable_stack_depot(char *str) > { > return kstrtobool(str, &stack_depot_disabled); > @@ -277,6 +295,7 @@ static void depot_init_pool(void *pool) > stack->handle.extra = 0; > > list_add(&stack->list, &free_stacks); > + counters[DEPOT_COUNTER_FREELIST_SIZE]++; > } > > /* Save reference to the pool to be used by depot_fetch_stack(). */ > @@ -376,6 +395,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) > /* Get and unlink the first entry from the freelist. */ > stack = list_first_entry(&free_stacks, struct stack_record, list); > list_del(&stack->list); > + counters[DEPOT_COUNTER_FREELIST_SIZE]--; > > /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ > if (size > CONFIG_STACKDEPOT_MAX_FRAMES) > @@ -394,6 +414,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) > */ > kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE); > > + counters[DEPOT_COUNTER_ALLOCS]++; > + counters[DEPOT_COUNTER_INUSE]++; > return stack; > } > > @@ -426,6 +448,10 @@ static void depot_free_stack(struct stack_record *stack) > lockdep_assert_held_write(&pool_rwlock); > > list_add(&stack->list, &free_stacks); > + > + counters[DEPOT_COUNTER_FREELIST_SIZE]++; > + counters[DEPOT_COUNTER_FREES]++; > + counters[DEPOT_COUNTER_INUSE]--; > } > > /* Calculates the hash for a stack. */ > @@ -690,3 +716,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) > return parts.extra; > } > EXPORT_SYMBOL(stack_depot_get_extra_bits); > + > +static int stats_show(struct seq_file *seq, void *v) > +{ > + /* > + * data race ok: These are just statistics counters, and approximate > + * statistics are ok for debugging. > + */ > + seq_printf(seq, "pools: %d\n", data_race(pools_num)); > + for (int i = 0; i < DEPOT_COUNTER_COUNT; i++) > + seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); > + > + return 0; > +} > +DEFINE_SHOW_ATTRIBUTE(stats); > + > +static int depot_debugfs_init(void) > +{ > + struct dentry *dir; > + > + if (stack_depot_disabled) > + return 0; > + > + dir = debugfs_create_dir("stackdepot", NULL); > + debugfs_create_file("stats", 0444, dir, NULL, &stats_fops); > + return 0; > +} > +late_initcall(depot_debugfs_init); > -- > 2.43.0.275.g3460e3d667-goog > Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index a0be5d05c7f0..80a8ca24ccc8 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) "stackdepot: " fmt +#include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/jhash.h> #include <linux/kernel.h> @@ -115,6 +116,23 @@ static bool new_pool_required = true; /* Lock that protects the variables above. */ static DEFINE_RWLOCK(pool_rwlock); +/* Statistics counters for debugfs. */ +enum depot_counter_id { + DEPOT_COUNTER_ALLOCS, + DEPOT_COUNTER_FREES, + DEPOT_COUNTER_INUSE, + DEPOT_COUNTER_FREELIST_SIZE, + DEPOT_COUNTER_COUNT, +}; +static long counters[DEPOT_COUNTER_COUNT]; +static const char *const counter_names[] = { + [DEPOT_COUNTER_ALLOCS] = "allocations", + [DEPOT_COUNTER_FREES] = "frees", + [DEPOT_COUNTER_INUSE] = "in_use", + [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size", +}; +static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT); + static int __init disable_stack_depot(char *str) { return kstrtobool(str, &stack_depot_disabled); @@ -277,6 +295,7 @@ static void depot_init_pool(void *pool) stack->handle.extra = 0; list_add(&stack->list, &free_stacks); + counters[DEPOT_COUNTER_FREELIST_SIZE]++; } /* Save reference to the pool to be used by depot_fetch_stack(). */ @@ -376,6 +395,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) /* Get and unlink the first entry from the freelist. */ stack = list_first_entry(&free_stacks, struct stack_record, list); list_del(&stack->list); + counters[DEPOT_COUNTER_FREELIST_SIZE]--; /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ if (size > CONFIG_STACKDEPOT_MAX_FRAMES) @@ -394,6 +414,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) */ kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE); + counters[DEPOT_COUNTER_ALLOCS]++; + counters[DEPOT_COUNTER_INUSE]++; return stack; } @@ -426,6 +448,10 @@ static void depot_free_stack(struct stack_record *stack) lockdep_assert_held_write(&pool_rwlock); list_add(&stack->list, &free_stacks); + + counters[DEPOT_COUNTER_FREELIST_SIZE]++; + counters[DEPOT_COUNTER_FREES]++; + counters[DEPOT_COUNTER_INUSE]--; } /* Calculates the hash for a stack. */ @@ -690,3 +716,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) return parts.extra; } EXPORT_SYMBOL(stack_depot_get_extra_bits); + +static int stats_show(struct seq_file *seq, void *v) +{ + /* + * data race ok: These are just statistics counters, and approximate + * statistics are ok for debugging. + */ + seq_printf(seq, "pools: %d\n", data_race(pools_num)); + for (int i = 0; i < DEPOT_COUNTER_COUNT; i++) + seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stats); + +static int depot_debugfs_init(void) +{ + struct dentry *dir; + + if (stack_depot_disabled) + return 0; + + dir = debugfs_create_dir("stackdepot", NULL); + debugfs_create_file("stats", 0444, dir, NULL, &stats_fops); + return 0; +} +late_initcall(depot_debugfs_init);
Add a few basic stats counters for stack depot that can be used to derive if stack depot is working as intended. This is a snapshot of the new stats after booting a system with a KASAN-enabled kernel: $ cat /sys/kernel/debug/stackdepot/stats pools: 838 allocations: 29861 frees: 6561 in_use: 23300 freelist_size: 1840 Generally, "pools" should be well below the max; once the system is booted, "in_use" should remain relatively steady. Signed-off-by: Marco Elver <elver@google.com> --- lib/stackdepot.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+)