Message ID | 20220901044249.4624-4-osalvador@suse.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | page_owner: print stacks and their counter | expand |
On 9/1/22 11:42 AM, Oscar Salvador wrote:> +static ssize_t read_page_owner_threshold(struct file *file, char __user *buf, > + size_t count, loff_t *pos) > +{ > + char *kbuf; > + int ret = 0; > + > + count = min_t(size_t, count, PAGE_SIZE); > + > + if (*pos >= count) > + return 0; > + > + kbuf = kmalloc(count, GFP_KERNEL); > + if (!kbuf) > + return ENOMEM; Missing a negative sign, return -ENOMEM. > + ret = scnprintf(kbuf, count, "%lu\n", threshold_count); > + if (copy_to_user(buf, kbuf, ret)) > + ret = -EFAULT; > + > + *pos += count; > + kfree(kbuf); > + > + return ret; > +} > + > +static ssize_t write_page_owner_threshold(struct file *file, const char __user *buf, > + size_t count, loff_t *pos) > +{ > + char *kbuf; > + int ret = 0; > + > + count = min_t(size_t, count, PAGE_SIZE); > + kbuf = kmalloc(count, GFP_KERNEL); This looks overestimating to me. For unsigned long, on a 64-bit system has max val 18446744073709551615 (20 chars). You can use stack a allocated local variable with length 21. No need to use kmalloc(). The same way with the read() op. > + if (!kbuf) > + return -ENOMEM; > + > + if (copy_from_user(kbuf, buf, count)) { > + ret = -EFAULT; > + goto out; > + } > + > + kbuf[count - 1] = '\0'; > + > + ret = kstrtoul(kbuf, 10, &threshold_count); > + > +out: > + kfree(kbuf); > + return ret ? ret : count; > +} > + > +static const struct file_operations proc_page_owner_threshold = { > + .read = read_page_owner_threshold, > + .write = write_page_owner_threshold, > +};
On Thu 01-09-22 06:42:49, Oscar Salvador wrote: [...] > +static ssize_t read_page_owner_threshold(struct file *file, char __user *buf, > + size_t count, loff_t *pos) > +{ > + char *kbuf; > + int ret = 0; > + > + count = min_t(size_t, count, PAGE_SIZE); > + > + if (*pos >= count) > + return 0; > + > + kbuf = kmalloc(count, GFP_KERNEL); No, you do not want to trigger user defined allocation like that. I would use seq_file.
On Thu, Sep 01, 2022 at 03:31:51PM +0700, Ammar Faizi wrote: > On 9/1/22 11:42 AM, Oscar Salvador wrote:> +static ssize_t read_page_owner_threshold(struct file *file, char __user *buf, > > + kbuf = kmalloc(count, GFP_KERNEL); > > + if (!kbuf) > > + return ENOMEM; > > Missing a negative sign, return -ENOMEM. Will fix. > > +static ssize_t write_page_owner_threshold(struct file *file, const char __user *buf, > > + size_t count, loff_t *pos) > > +{ > > + char *kbuf; > > + int ret = 0; > > + > > + count = min_t(size_t, count, PAGE_SIZE); > > + kbuf = kmalloc(count, GFP_KERNEL); > > This looks overestimating to me. For unsigned long, on a 64-bit system > has max val 18446744073709551615 (20 chars). > > You can use stack a allocated local variable with length 21. No need > to use kmalloc(). The same way with the read() op. Probably could do that, but I'll go with Michal's option and will use seq_file. Thanks!
On Thu, Sep 01, 2022 at 10:40:32AM +0200, Michal Hocko wrote: > On Thu 01-09-22 06:42:49, Oscar Salvador wrote: > [...] > > +static ssize_t read_page_owner_threshold(struct file *file, char __user *buf, > > + size_t count, loff_t *pos) > > +{ > > + char *kbuf; > > + int ret = 0; > > + > > + count = min_t(size_t, count, PAGE_SIZE); > > + > > + if (*pos >= count) > > + return 0; > > + > > + kbuf = kmalloc(count, GFP_KERNEL); > > No, you do not want to trigger user defined allocation like that. I > would use seq_file. Sure, will use that. Thanks!
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 20f62039f23a..ee66be40a152 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -26,7 +26,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, stack_action_t action); void stack_depot_dec_count(depot_stack_handle_t handle); int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos, - unsigned long *last_stack); + unsigned long *last_stack, + unsigned long threshold); /* * Every user of stack depot has to call stack_depot_init() during its own init diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 3090ae0f3958..b4a04f09a7b7 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -528,7 +528,8 @@ depot_stack_handle_t stack_depot_save_action(unsigned long *entries, EXPORT_SYMBOL_GPL(stack_depot_save_action); int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos, - unsigned long *last_stack) + unsigned long *last_stack, + unsigned long threshold) { struct stack_record *stack = NULL, *last; struct stack_record **stacks; @@ -547,7 +548,8 @@ int stack_depot_print_stacks_threshold(char *buf, size_t size, loff_t *pos, for (; stack; stack = stack->next) { if (!stack->size || stack->size < 0 || stack->size > size || stack->handle.valid != 1 || - refcount_read(&stack->count) < 1) + refcount_read(&stack->count) < 1 || + refcount_read(&stack->count) < threshold) continue; ret += stack_trace_snprint(buf, size, stack->entries, stack->size, 0); diff --git a/mm/page_owner.c b/mm/page_owner.c index 8c67c7eb2451..ef10cf44aaec 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -44,6 +44,7 @@ static depot_stack_handle_t early_handle; static void init_early_allocated_pages(void); static unsigned long last_stack = 0; +static unsigned long threshold_count = 0; static int __init early_page_owner_param(char *buf) { @@ -676,7 +677,8 @@ static ssize_t read_page_owner_stacks(struct file *file, char __user *buf, if (!kbuf) return ENOMEM; - ret += stack_depot_print_stacks_threshold(kbuf, count, pos, &last_stack); + ret += stack_depot_print_stacks_threshold(kbuf, count, pos, &last_stack, + threshold_count); if (copy_to_user(buf, kbuf, ret)) ret = -EFAULT; @@ -687,6 +689,61 @@ static ssize_t read_page_owner_stacks(struct file *file, char __user *buf, return ret; } +static ssize_t read_page_owner_threshold(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + char *kbuf; + int ret = 0; + + count = min_t(size_t, count, PAGE_SIZE); + + if (*pos >= count) + return 0; + + kbuf = kmalloc(count, GFP_KERNEL); + if (!kbuf) + return ENOMEM; + + ret = scnprintf(kbuf, count, "%lu\n", threshold_count); + if (copy_to_user(buf, kbuf, ret)) + ret = -EFAULT; + + *pos += count; + kfree(kbuf); + + return ret; +} + +static ssize_t write_page_owner_threshold(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + char *kbuf; + int ret = 0; + + count = min_t(size_t, count, PAGE_SIZE); + kbuf = kmalloc(count, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + if (copy_from_user(kbuf, buf, count)) { + ret = -EFAULT; + goto out; + } + + kbuf[count - 1] = '\0'; + + ret = kstrtoul(kbuf, 10, &threshold_count); + +out: + kfree(kbuf); + return ret ? ret : count; +} + +static const struct file_operations proc_page_owner_threshold = { + .read = read_page_owner_threshold, + .write = write_page_owner_threshold, +}; + static const struct file_operations proc_page_owner_stacks = { .read = read_page_owner_stacks, }; @@ -706,6 +763,8 @@ static int __init pageowner_init(void) &proc_page_owner_operations); debugfs_create_file("page_owner_stacks", 0400, NULL, NULL, &proc_page_owner_stacks); + debugfs_create_file("page_owner_threshold", 0600, NULL, NULL, + &proc_page_owner_threshold); return 0; }
We want to be able to filter out the output on a threshold basis, in this way we can get rid of a lot of noise and focus only on those stacks which have an allegedly high counter. We can control the threshold value by a new file called 'page_owner_threshold', which is 0 by default. Signed-off-by: Oscar Salvador <osalvador@suse.de> --- include/linux/stackdepot.h | 3 +- lib/stackdepot.c | 6 ++-- mm/page_owner.c | 61 +++++++++++++++++++++++++++++++++++++- 3 files changed, 66 insertions(+), 4 deletions(-)