diff mbox series

[v7,3/4] mm,page_owner: Display all stacks and their count

Message ID 20240208234539.19113-4-osalvador@suse.de (mailing list archive)
State New
Headers show
Series page_owner: print stacks and their outstanding allocations | expand

Commit Message

Oscar Salvador Feb. 8, 2024, 11:45 p.m. UTC
This patch adds a new file called 'page_owner_stacks', which
will show all stacks that were added by page_owner followed by
their counting, giving us a clear overview of stack <-> count
relationship.

E.g:

  prep_new_page+0xa9/0x120
  get_page_from_freelist+0x801/0x2210
  __alloc_pages+0x18b/0x350
  alloc_pages_mpol+0x91/0x1f0
  folio_alloc+0x14/0x50
  filemap_alloc_folio+0xb2/0x100
  __filemap_get_folio+0x14a/0x490
  ext4_write_begin+0xbd/0x4b0 [ext4]
  generic_perform_write+0xc1/0x1e0
  ext4_buffered_write_iter+0x68/0xe0 [ext4]
  ext4_file_write_iter+0x70/0x740 [ext4]
  vfs_write+0x33d/0x420
  ksys_write+0xa5/0xe0
  do_syscall_64+0x80/0x160
  entry_SYSCALL_64_after_hwframe+0x6e/0x76
 stack_count: 4578

In order to show all the stacks, we implement stack_depot_get_next_stack(),
which walks all buckets while retrieving the stacks stored in them.
stack_depot_get_next_stack() will return all stacks, one at a time,
by first finding a non-empty bucket, and then retrieving all the stacks
stored in that bucket.
Once we have completely gone through it, we get the next non-empty bucket
and repeat the same steps, and so on until we have completely checked all
buckets.

Signed-off-by: Oscar Salvador <osalvador@suse.de>
---
 include/linux/stackdepot.h | 20 +++++++++
 lib/stackdepot.c           | 46 +++++++++++++++++++++
 mm/page_owner.c            | 85 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 151 insertions(+)

Comments

Marco Elver Feb. 9, 2024, 8 a.m. UTC | #1
On Fri, 9 Feb 2024 at 00:45, Oscar Salvador <osalvador@suse.de> wrote:
>
> This patch adds a new file called 'page_owner_stacks', which
> will show all stacks that were added by page_owner followed by
> their counting, giving us a clear overview of stack <-> count
> relationship.
>
> E.g:
>
>   prep_new_page+0xa9/0x120
>   get_page_from_freelist+0x801/0x2210
>   __alloc_pages+0x18b/0x350
>   alloc_pages_mpol+0x91/0x1f0
>   folio_alloc+0x14/0x50
>   filemap_alloc_folio+0xb2/0x100
>   __filemap_get_folio+0x14a/0x490
>   ext4_write_begin+0xbd/0x4b0 [ext4]
>   generic_perform_write+0xc1/0x1e0
>   ext4_buffered_write_iter+0x68/0xe0 [ext4]
>   ext4_file_write_iter+0x70/0x740 [ext4]
>   vfs_write+0x33d/0x420
>   ksys_write+0xa5/0xe0
>   do_syscall_64+0x80/0x160
>   entry_SYSCALL_64_after_hwframe+0x6e/0x76
>  stack_count: 4578
>
> In order to show all the stacks, we implement stack_depot_get_next_stack(),
> which walks all buckets while retrieving the stacks stored in them.
> stack_depot_get_next_stack() will return all stacks, one at a time,
> by first finding a non-empty bucket, and then retrieving all the stacks
> stored in that bucket.
> Once we have completely gone through it, we get the next non-empty bucket
> and repeat the same steps, and so on until we have completely checked all
> buckets.
>
> Signed-off-by: Oscar Salvador <osalvador@suse.de>
> ---
>  include/linux/stackdepot.h | 20 +++++++++
>  lib/stackdepot.c           | 46 +++++++++++++++++++++
>  mm/page_owner.c            | 85 ++++++++++++++++++++++++++++++++++++++
>  3 files changed, 151 insertions(+)
>
> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
> index ac62de4d4999..d851ec821e6f 100644
> --- a/include/linux/stackdepot.h
> +++ b/include/linux/stackdepot.h
> @@ -183,6 +183,26 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
>   */
>  struct stack_record *stack_depot_get_stack(depot_stack_handle_t handle);
>
> +/**
> + * stack_depot_get_next_stack - Returns all stacks, one at a time

"Returns all stack_records" to be clear that this is returning the struct.

> + *
> + * @table:     Current table we are checking
> + * @bucket:    Current bucket we are checking
> + * @last_found:        Last stack that was found
> + *
> + * This function finds first a non-empty bucket and returns the first stack
> + * stored in it. On consequent calls, it walks the bucket to see whether
> + * it contains more stacks.
> + * Once we have walked all the stacks in a bucket, we check
> + * the next one, and we repeat the same steps until we have checked all of them

I think for this function it's important to say that no entry returned
from this function can be evicted.

I.e. the easiest way to ensure this is that the caller makes sure the
entries returned are never passed to stack_depot_put() - which is
certainly the case for your usecase because you do not use
stack_depot_put().

> + * Return: A pointer a to stack_record struct, or NULL when we have walked all
> + * buckets.
> + */
> +struct stack_record *stack_depot_get_next_stack(unsigned long *table,

To keep consistent, I'd also call this
__stack_depot_get_next_stack_record(), so that we're clear this is
more of an internal function not for general usage.

> +                                               struct list_head **bucket,
> +                                               struct stack_record **last_found);
> +
>  /**
>   * stack_depot_fetch - Fetch a stack trace from stack depot
>   *
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 197c355601f9..107bd0174cd6 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -782,6 +782,52 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
>  }
>  EXPORT_SYMBOL(stack_depot_get_extra_bits);
>
> +struct stack_record *stack_depot_get_next_stack(unsigned long *table,
> +                                               struct list_head **curr_bucket,
> +                                               struct stack_record **last_found)
> +{
> +       struct list_head *bucket = *curr_bucket;
> +       unsigned long nr_table = *table;
> +       struct stack_record *found = NULL;
> +       unsigned long stack_table_entries = stack_hash_mask + 1;
> +
> +       rcu_read_lock_sched_notrace();

We are returning pointers to stack_records out of the RCU-read
critical section, which are then later used to continue the iteration.
list_for_each_entry_continue_rcu() says this is fine if "... you held
some sort of non-RCU reference (such as a reference count) ...".
Updating the function's documentation to say none of these entries can
be evicted via a stack_depot_put() is required.

> +       if (!bucket) {
> +               /*
> +                * Find a non-empty bucket. Once we have found it,
> +                * we will use list_for_each_entry_continue_rcu() on the next
> +                * call to keep walking the bucket.
> +                */
> +new_table:
> +               bucket = &stack_table[nr_table];
> +               list_for_each_entry_rcu(found, bucket, hash_list) {
> +                       goto out;
> +               }
> +       } else {
> +                /* Check whether we have more stacks in this bucket */
> +               found = *last_found;
> +               list_for_each_entry_continue_rcu(found, bucket, hash_list) {
> +                       goto out;
> +               }
> +       }
> +
> +       /* No more stacks in this bucket, check the next one */
> +       nr_table++;
> +       if (nr_table < stack_table_entries)
> +               goto new_table;
> +
> +       /* We are done walking all buckets */
> +       found = NULL;
> +
> +out:
> +       *table = nr_table;
> +       *curr_bucket = bucket;
> +       *last_found = found;
> +       rcu_read_unlock_sched_notrace();
> +
> +       return found;
> +}
> +
>  static int stats_show(struct seq_file *seq, void *v)
>  {
>         /*
> diff --git a/mm/page_owner.c b/mm/page_owner.c
> index 0adf41702b9d..aea212734557 100644
> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -749,6 +749,89 @@ static const struct file_operations proc_page_owner_operations = {
>         .llseek         = lseek_page_owner,
>  };
>
> +struct stack_iterator {
> +       unsigned long nr_table;
> +       struct list_head *bucket;
> +       struct stack_record *last_stack;
> +};
> +
> +static void *stack_start(struct seq_file *m, loff_t *ppos)
> +{
> +       struct stack_iterator *iter = m->private;
> +
> +       if (*ppos == -1UL)
> +               return NULL;
> +
> +       return stack_depot_get_next_stack(&iter->nr_table,
> +                                         &iter->bucket,
> +                                         &iter->last_stack);
> +}
> +
> +static void *stack_next(struct seq_file *m, void *v, loff_t *ppos)
> +{
> +       struct stack_iterator *iter = m->private;
> +       struct stack_record *stack;
> +
> +       stack = stack_depot_get_next_stack(&iter->nr_table,
> +                                          &iter->bucket,
> +                                          &iter->last_stack);
> +       *ppos = stack ? *ppos + 1 : -1UL;
> +
> +       return stack;
> +}
> +
> +static int stack_print(struct seq_file *m, void *v)
> +{
> +       char *buf;
> +       int ret = 0;
> +       struct stack_iterator *iter = m->private;
> +       struct stack_record *stack = iter->last_stack;
> +
> +       if (!stack->size || stack->size < 0 || refcount_read(&stack->count) < 2)
> +               return 0;
> +
> +       buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> +
> +       ret += stack_trace_snprint(buf, PAGE_SIZE, stack->entries, stack->size,
> +                                  0);
> +       if (!ret)
> +               goto out;
> +
> +       scnprintf(buf + ret, PAGE_SIZE - ret, "stack_count: %d\n\n",
> +                 refcount_read(&stack->count));
> +
> +       seq_printf(m, buf);
> +       seq_puts(m, "\n\n");
> +out:
> +       kfree(buf);
> +
> +       return 0;
> +}
> +
> +static void stack_stop(struct seq_file *m, void *v)
> +{
> +}
> +
> +static const struct seq_operations page_owner_stack_op = {
> +       .start  = stack_start,
> +       .next   = stack_next,
> +       .stop   = stack_stop,
> +       .show   = stack_print
> +};
> +
> +static int page_owner_stack_open(struct inode *inode, struct file *file)
> +{
> +       return seq_open_private(file, &page_owner_stack_op,
> +                               sizeof(struct stack_iterator));
> +}
> +
> +const struct file_operations page_owner_stack_operations = {
> +       .open           = page_owner_stack_open,
> +       .read           = seq_read,
> +       .llseek         = seq_lseek,
> +       .release        = seq_release,
> +};
> +
>  static int __init pageowner_init(void)
>  {
>         if (!static_branch_unlikely(&page_owner_inited)) {
> @@ -758,6 +841,8 @@ static int __init pageowner_init(void)
>
>         debugfs_create_file("page_owner", 0400, NULL, NULL,
>                             &proc_page_owner_operations);
> +       debugfs_create_file("page_owner_stacks", 0400, NULL, NULL,
> +                           &page_owner_stack_operations);
>
>         return 0;
>  }
> --
> 2.43.0
>
Oscar Salvador Feb. 9, 2024, 9:52 p.m. UTC | #2
On Fri, Feb 09, 2024 at 09:00:40AM +0100, Marco Elver wrote:
> > +/**
> > + * stack_depot_get_next_stack - Returns all stacks, one at a time
> 
> "Returns all stack_records" to be clear that this is returning the struct.

Fixed.


> 
> > + *
> > + * @table:     Current table we are checking
> > + * @bucket:    Current bucket we are checking
> > + * @last_found:        Last stack that was found
> > + *
> > + * This function finds first a non-empty bucket and returns the first stack
> > + * stored in it. On consequent calls, it walks the bucket to see whether
> > + * it contains more stacks.
> > + * Once we have walked all the stacks in a bucket, we check
> > + * the next one, and we repeat the same steps until we have checked all of them
> 
> I think for this function it's important to say that no entry returned
> from this function can be evicted.
> 
> I.e. the easiest way to ensure this is that the caller makes sure the
> entries returned are never passed to stack_depot_put() - which is
> certainly the case for your usecase because you do not use
> stack_depot_put().
> 
> > + * Return: A pointer a to stack_record struct, or NULL when we have walked all
> > + * buckets.
> > + */
> > +struct stack_record *stack_depot_get_next_stack(unsigned long *table,
> 
> To keep consistent, I'd also call this
> __stack_depot_get_next_stack_record(), so that we're clear this is
> more of an internal function not for general usage.
> 
> > +                                               struct list_head **bucket,
> > +                                               struct stack_record **last_found);
> > +
> >  /**
> >   * stack_depot_fetch - Fetch a stack trace from stack depot
> >   *
> > diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> > index 197c355601f9..107bd0174cd6 100644
> > --- a/lib/stackdepot.c
> > +++ b/lib/stackdepot.c
> > @@ -782,6 +782,52 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
> >  }
> >  EXPORT_SYMBOL(stack_depot_get_extra_bits);
> >
> > +struct stack_record *stack_depot_get_next_stack(unsigned long *table,
> > +                                               struct list_head **curr_bucket,
> > +                                               struct stack_record **last_found)
> > +{
> > +       struct list_head *bucket = *curr_bucket;
> > +       unsigned long nr_table = *table;
> > +       struct stack_record *found = NULL;
> > +       unsigned long stack_table_entries = stack_hash_mask + 1;
> > +
> > +       rcu_read_lock_sched_notrace();
> 
> We are returning pointers to stack_records out of the RCU-read
> critical section, which are then later used to continue the iteration.
> list_for_each_entry_continue_rcu() says this is fine if "... you held
> some sort of non-RCU reference (such as a reference count) ...".
> Updating the function's documentation to say none of these entries can
> be evicted via a stack_depot_put() is required.

Thinking about it some more, I think I made a mistake:

I am walking all buckets, and within those buckets there are not only
page_owner stack_records, which means that I could return a stack_record
from e.g: KASAN (which I think can evict stack_records) and then
everything goes off the rails.
Which means I cannot walk the buckets like that.

Actually, I think that having something like the following

 struct list_stack_records {
      struct stack_record *stack;
      struct list_stack_records *next;
 }

in page_owner would make sense.
Then the only thing I would have to do is to add a new record on every
new stack_record, and then I could just walk the list like a linked
list.

Which means that the function stack_depot_get_next_stack() could be
killed because everything would happen in page_owner code.

e.g:

 static void inc_stack_record_count(depot_stack_handle_t handle)
 {
         struct stack_record *stack = __stack_depot_get_stack_record(handle);
 
         if (stack) {
                 /*
                  * New stack_record's that do not use STACK_DEPOT_FLAG_GET start
                  * with REFCOUNT_SATURATED to catch spurious increments of their
                  * refcount.
                  * Since we do not use STACK_DEPOT_FLAG_{GET,PUT} API, let us
                  * set a refcount of 1 ourselves.
                  */
                 if (refcount_read(&stack->count) == REFCOUNT_SATURATED) {
                         refcount_set(&stack->count, 1);
			 add_new_stack_record_into_the_list(stack)
		 }
                 refcount_inc(&stack->count);
         }
 }

and then just walk the list_stack_records list whenever we want to
show the stacktraces and their counting.

I think that overall this approach is cleaner and safer.
Oscar Salvador Feb. 9, 2024, 11:14 p.m. UTC | #3
On Fri, Feb 09, 2024 at 10:52:48PM +0100, Oscar Salvador wrote:
> Thinking about it some more, I think I made a mistake:
> 
> I am walking all buckets, and within those buckets there are not only
> page_owner stack_records, which means that I could return a stack_record
> from e.g: KASAN (which I think can evict stack_records) and then
> everything goes off the rails.
> Which means I cannot walk the buckets like that.
> 
> Actually, I think that having something like the following
> 
>  struct list_stack_records {
>       struct stack_record *stack;
>       struct list_stack_records *next;
>  }

Or, I could use the extra_bits field from handle_parts to flag that
when a depot_stack_handle_t is used by page_owner.

Then __stack_depot_get_next_stack_record() would check whether
a stack_record->handle.extra_bits has the page_owner bit, and only
return those stacks that have such bit.
This would solve the problem of returning a potentially evictable stack
, only by returning page_owner's stack_records, and I would not have
to maintain my own list.
 
I yet have to see how that would look like, but sounds promising.
Do you think that is feasible Marco?

Thanks
kernel test robot Feb. 9, 2024, 11:14 p.m. UTC | #4
Hi Oscar,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-nonmm-unstable]
[also build test WARNING on linus/master v6.8-rc3]
[cannot apply to akpm-mm/mm-everything next-20240209]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Oscar-Salvador/lib-stackdepot-Move-stack_record-struct-definition-into-the-header/20240209-074611
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-nonmm-unstable
patch link:    https://lore.kernel.org/r/20240208234539.19113-4-osalvador%40suse.de
patch subject: [PATCH v7 3/4] mm,page_owner: Display all stacks and their count
config: x86_64-randconfig-121-20240209 (https://download.01.org/0day-ci/archive/20240210/202402100636.5lFPAlbB-lkp@intel.com/config)
compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240210/202402100636.5lFPAlbB-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202402100636.5lFPAlbB-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
>> mm/page_owner.c:828:30: sparse: sparse: symbol 'page_owner_stack_operations' was not declared. Should it be static?
   mm/page_owner.c: note: in included file (through include/linux/mmzone.h, include/linux/gfp.h, include/linux/xarray.h, ...):
   include/linux/page-flags.h:242:46: sparse: sparse: self-comparison always evaluates to false

vim +/page_owner_stack_operations +828 mm/page_owner.c

   827	
 > 828	const struct file_operations page_owner_stack_operations = {
   829		.open		= page_owner_stack_open,
   830		.read		= seq_read,
   831		.llseek		= seq_lseek,
   832		.release	= seq_release,
   833	};
   834
Marco Elver Feb. 10, 2024, 7:52 a.m. UTC | #5
On Sat, 10 Feb 2024 at 00:13, Oscar Salvador <osalvador@suse.de> wrote:
>
> On Fri, Feb 09, 2024 at 10:52:48PM +0100, Oscar Salvador wrote:
> > Thinking about it some more, I think I made a mistake:
> >
> > I am walking all buckets, and within those buckets there are not only
> > page_owner stack_records, which means that I could return a stack_record
> > from e.g: KASAN (which I think can evict stack_records) and then
> > everything goes off the rails.
> > Which means I cannot walk the buckets like that.
> >
> > Actually, I think that having something like the following
> >
> >  struct list_stack_records {
> >       struct stack_record *stack;
> >       struct list_stack_records *next;
> >  }
>
> Or, I could use the extra_bits field from handle_parts to flag that
> when a depot_stack_handle_t is used by page_owner.
>
> Then __stack_depot_get_next_stack_record() would check whether
> a stack_record->handle.extra_bits has the page_owner bit, and only
> return those stacks that have such bit.
> This would solve the problem of returning a potentially evictable stack
> , only by returning page_owner's stack_records, and I would not have
> to maintain my own list.
>
> I yet have to see how that would look like, but sounds promising.
> Do you think that is feasible Marco?

The extra bits are used by KMSAN, and might conflict if enabled at the
same time. I think the safest option is to keep your own list. I think
that will also be more performant if there are other stackdepot users
because you do not have to traverse any of the other entries.
Oscar Salvador Feb. 11, 2024, 8:39 p.m. UTC | #6
On Sat, Feb 10, 2024 at 08:52:25AM +0100, Marco Elver wrote:
> The extra bits are used by KMSAN, and might conflict if enabled at the
> same time. I think the safest option is to keep your own list. I think
> that will also be more performant if there are other stackdepot users
> because you do not have to traverse any of the other entries.

Ok, I thought we had spare bits for other users.
But thinking about it some more, yes, it makes sense for page_owner to
maintain its own list, so traversing it is faster and we do not have
to place code to traverse the buckets in stackdepot.
Vlastimil Babka Feb. 12, 2024, 10:47 a.m. UTC | #7
On 2/11/24 21:39, Oscar Salvador wrote:
> On Sat, Feb 10, 2024 at 08:52:25AM +0100, Marco Elver wrote:
>> The extra bits are used by KMSAN, and might conflict if enabled at the
>> same time. I think the safest option is to keep your own list. I think
>> that will also be more performant if there are other stackdepot users
>> because you do not have to traverse any of the other entries.
> 
> Ok, I thought we had spare bits for other users.
> But thinking about it some more, yes, it makes sense for page_owner to
> maintain its own list, so traversing it is faster and we do not have
> to place code to traverse the buckets in stackdepot.

Would it make sense to introduce per-user stack depot instances? ("user"
being a subsystem i.e. kasan or page_owner). I'd expect each to have a
distinct set of stacks, so there's no benefits of using the same hash table,
only downsides of longer collision lists?

I can imagine this would be easier for users that don't need the early init
kind of stackdepot, but maybe even there it could be feasible to have a
small fixed size array of hash table roots and every user would get a
separate index?
diff mbox series

Patch

diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index ac62de4d4999..d851ec821e6f 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -183,6 +183,26 @@  depot_stack_handle_t stack_depot_save(unsigned long *entries,
  */
 struct stack_record *stack_depot_get_stack(depot_stack_handle_t handle);
 
+/**
+ * stack_depot_get_next_stack - Returns all stacks, one at a time
+ *
+ * @table:	Current table we are checking
+ * @bucket:	Current bucket we are checking
+ * @last_found:	Last stack that was found
+ *
+ * This function finds first a non-empty bucket and returns the first stack
+ * stored in it. On consequent calls, it walks the bucket to see whether
+ * it contains more stacks.
+ * Once we have walked all the stacks in a bucket, we check
+ * the next one, and we repeat the same steps until we have checked all of them
+ *
+ * Return: A pointer a to stack_record struct, or NULL when we have walked all
+ * buckets.
+ */
+struct stack_record *stack_depot_get_next_stack(unsigned long *table,
+						struct list_head **bucket,
+						struct stack_record **last_found);
+
 /**
  * stack_depot_fetch - Fetch a stack trace from stack depot
  *
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 197c355601f9..107bd0174cd6 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -782,6 +782,52 @@  unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
 }
 EXPORT_SYMBOL(stack_depot_get_extra_bits);
 
+struct stack_record *stack_depot_get_next_stack(unsigned long *table,
+						struct list_head **curr_bucket,
+						struct stack_record **last_found)
+{
+	struct list_head *bucket = *curr_bucket;
+	unsigned long nr_table = *table;
+	struct stack_record *found = NULL;
+	unsigned long stack_table_entries = stack_hash_mask + 1;
+
+	rcu_read_lock_sched_notrace();
+	if (!bucket) {
+		/*
+		 * Find a non-empty bucket. Once we have found it,
+		 * we will use list_for_each_entry_continue_rcu() on the next
+		 * call to keep walking the bucket.
+		 */
+new_table:
+		bucket = &stack_table[nr_table];
+		list_for_each_entry_rcu(found, bucket, hash_list) {
+			goto out;
+		}
+	} else {
+		 /* Check whether we have more stacks in this bucket */
+		found = *last_found;
+		list_for_each_entry_continue_rcu(found, bucket, hash_list) {
+			goto out;
+		}
+	}
+
+	/* No more stacks in this bucket, check the next one */
+	nr_table++;
+	if (nr_table < stack_table_entries)
+		goto new_table;
+
+	/* We are done walking all buckets */
+	found = NULL;
+
+out:
+	*table = nr_table;
+	*curr_bucket = bucket;
+	*last_found = found;
+	rcu_read_unlock_sched_notrace();
+
+	return found;
+}
+
 static int stats_show(struct seq_file *seq, void *v)
 {
 	/*
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0adf41702b9d..aea212734557 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -749,6 +749,89 @@  static const struct file_operations proc_page_owner_operations = {
 	.llseek		= lseek_page_owner,
 };
 
+struct stack_iterator {
+	unsigned long nr_table;
+	struct list_head *bucket;
+	struct stack_record *last_stack;
+};
+
+static void *stack_start(struct seq_file *m, loff_t *ppos)
+{
+	struct stack_iterator *iter = m->private;
+
+	if (*ppos == -1UL)
+		return NULL;
+
+	return stack_depot_get_next_stack(&iter->nr_table,
+					  &iter->bucket,
+					  &iter->last_stack);
+}
+
+static void *stack_next(struct seq_file *m, void *v, loff_t *ppos)
+{
+	struct stack_iterator *iter = m->private;
+	struct stack_record *stack;
+
+	stack = stack_depot_get_next_stack(&iter->nr_table,
+					   &iter->bucket,
+					   &iter->last_stack);
+	*ppos = stack ? *ppos + 1 : -1UL;
+
+	return stack;
+}
+
+static int stack_print(struct seq_file *m, void *v)
+{
+	char *buf;
+	int ret = 0;
+	struct stack_iterator *iter = m->private;
+	struct stack_record *stack = iter->last_stack;
+
+	if (!stack->size || stack->size < 0 || refcount_read(&stack->count) < 2)
+		return 0;
+
+	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+	ret += stack_trace_snprint(buf, PAGE_SIZE, stack->entries, stack->size,
+				   0);
+	if (!ret)
+		goto out;
+
+	scnprintf(buf + ret, PAGE_SIZE - ret, "stack_count: %d\n\n",
+		  refcount_read(&stack->count));
+
+	seq_printf(m, buf);
+	seq_puts(m, "\n\n");
+out:
+	kfree(buf);
+
+	return 0;
+}
+
+static void stack_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations page_owner_stack_op = {
+	.start	= stack_start,
+	.next	= stack_next,
+	.stop	= stack_stop,
+	.show	= stack_print
+};
+
+static int page_owner_stack_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &page_owner_stack_op,
+				sizeof(struct stack_iterator));
+}
+
+const struct file_operations page_owner_stack_operations = {
+	.open		= page_owner_stack_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static int __init pageowner_init(void)
 {
 	if (!static_branch_unlikely(&page_owner_inited)) {
@@ -758,6 +841,8 @@  static int __init pageowner_init(void)
 
 	debugfs_create_file("page_owner", 0400, NULL, NULL,
 			    &proc_page_owner_operations);
+	debugfs_create_file("page_owner_stacks", 0400, NULL, NULL,
+			    &page_owner_stack_operations);
 
 	return 0;
 }