diff mbox series

[mm,v2,30/33] kasan: implement stack ring for tag-based modes

Message ID 0e910197bfbcf505122f6dae2ee9b90ff8ee31f7.1658189199.git.andreyknvl@google.com (mailing list archive)
State New
Headers show
Series kasan: switch tag-based modes to stack ring from per-object metadata | expand

Commit Message

andrey.konovalov@linux.dev July 19, 2022, 12:10 a.m. UTC
From: Andrey Konovalov <andreyknvl@google.com>

Implement storing stack depot handles for alloc/free stack traces for
slab objects for the tag-based KASAN modes in a ring buffer.

This ring buffer is referred to as the stack ring.

On each alloc/free of a slab object, the tagged address of the object and
the current stack trace are recorded in the stack ring.

On each bug report, if the accessed address belongs to a slab object, the
stack ring is scanned for matching entries. The newest entries are used to
print the alloc/free stack traces in the report: one entry for alloc and
one for free.

The number of entries in the stack ring is fixed in this patch, but one of
the following patches adds a command-line argument to control it.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

---

Changes v1->v2:
- Only use the atomic type for pos, use READ/WRITE_ONCE() for the rest.
- Rename KASAN_STACK_RING_ENTRIES to KASAN_STACK_RING_SIZE.
- Rename object local variable in kasan_complete_mode_report_info() to
  ptr to match the name in kasan_stack_ring_entry.
- Detect stack ring entry slots that are being written to.
- Use read-write lock to disallow reading half-written stack ring entries.
- Add a comment about the stack ring being best-effort.
---
 mm/kasan/kasan.h       | 21 ++++++++++++
 mm/kasan/report_tags.c | 76 ++++++++++++++++++++++++++++++++++++++++++
 mm/kasan/tags.c        | 50 +++++++++++++++++++++++++++
 3 files changed, 147 insertions(+)

Comments

Marco Elver July 19, 2022, 11:41 a.m. UTC | #1
On Tue, 19 Jul 2022 at 02:15, <andrey.konovalov@linux.dev> wrote:
>
> From: Andrey Konovalov <andreyknvl@google.com>
>
> Implement storing stack depot handles for alloc/free stack traces for
> slab objects for the tag-based KASAN modes in a ring buffer.
>
> This ring buffer is referred to as the stack ring.
>
> On each alloc/free of a slab object, the tagged address of the object and
> the current stack trace are recorded in the stack ring.
>
> On each bug report, if the accessed address belongs to a slab object, the
> stack ring is scanned for matching entries. The newest entries are used to
> print the alloc/free stack traces in the report: one entry for alloc and
> one for free.
>
> The number of entries in the stack ring is fixed in this patch, but one of
> the following patches adds a command-line argument to control it.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>
> ---
>
> Changes v1->v2:
> - Only use the atomic type for pos, use READ/WRITE_ONCE() for the rest.
> - Rename KASAN_STACK_RING_ENTRIES to KASAN_STACK_RING_SIZE.
> - Rename object local variable in kasan_complete_mode_report_info() to
>   ptr to match the name in kasan_stack_ring_entry.
> - Detect stack ring entry slots that are being written to.
> - Use read-write lock to disallow reading half-written stack ring entries.
> - Add a comment about the stack ring being best-effort.
> ---
>  mm/kasan/kasan.h       | 21 ++++++++++++
>  mm/kasan/report_tags.c | 76 ++++++++++++++++++++++++++++++++++++++++++
>  mm/kasan/tags.c        | 50 +++++++++++++++++++++++++++
>  3 files changed, 147 insertions(+)
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 7df107dc400a..cfff81139d67 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -2,6 +2,7 @@
>  #ifndef __MM_KASAN_KASAN_H
>  #define __MM_KASAN_KASAN_H
>
> +#include <linux/atomic.h>
>  #include <linux/kasan.h>
>  #include <linux/kasan-tags.h>
>  #include <linux/kfence.h>
> @@ -233,6 +234,26 @@ struct kasan_free_meta {
>
>  #endif /* CONFIG_KASAN_GENERIC */
>
> +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
> +
> +struct kasan_stack_ring_entry {
> +       void *ptr;
> +       size_t size;
> +       u32 pid;
> +       depot_stack_handle_t stack;
> +       bool is_free;
> +};
> +
> +#define KASAN_STACK_RING_SIZE (32 << 10)
> +
> +struct kasan_stack_ring {
> +       rwlock_t lock;
> +       atomic64_t pos;
> +       struct kasan_stack_ring_entry entries[KASAN_STACK_RING_SIZE];
> +};
> +
> +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
> +
>  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
>  /* Used in KUnit-compatible KASAN tests. */
>  struct kunit_kasan_status {
> diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
> index 5cbac2cdb177..a996489e6dac 100644
> --- a/mm/kasan/report_tags.c
> +++ b/mm/kasan/report_tags.c
> @@ -4,8 +4,12 @@
>   * Copyright (c) 2020 Google, Inc.
>   */
>
> +#include <linux/atomic.h>
> +
>  #include "kasan.h"
>
> +extern struct kasan_stack_ring stack_ring;
> +
>  static const char *get_bug_type(struct kasan_report_info *info)
>  {
>         /*
> @@ -24,5 +28,77 @@ static const char *get_bug_type(struct kasan_report_info *info)
>
>  void kasan_complete_mode_report_info(struct kasan_report_info *info)
>  {
> +       unsigned long flags;
> +       u64 pos;
> +       struct kasan_stack_ring_entry *entry;
> +       void *ptr;
> +       u32 pid;
> +       depot_stack_handle_t stack;
> +       bool is_free;
> +       bool alloc_found = false, free_found = false;
> +
>         info->bug_type = get_bug_type(info);
> +
> +       if (!info->cache || !info->object)
> +               return;
> +       }
> +
> +       write_lock_irqsave(&stack_ring.lock, flags);
> +
> +       pos = atomic64_read(&stack_ring.pos);
> +
> +       /*
> +        * The loop below tries to find stack ring entries relevant to the
> +        * buggy object. This is a best-effort process.
> +        *
> +        * First, another object with the same tag can be allocated in place of
> +        * the buggy object. Also, since the number of entries is limited, the
> +        * entries relevant to the buggy object can be overwritten.
> +        */
> +
> +       for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
> +               if (alloc_found && free_found)
> +                       break;
> +
> +               entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
> +
> +               /* Paired with smp_store_release() in save_stack_info(). */
> +               ptr = (void *)smp_load_acquire(&entry->ptr);
> +
> +               if (kasan_reset_tag(ptr) != info->object ||
> +                   get_tag(ptr) != get_tag(info->access_addr))
> +                       continue;
> +
> +               pid = READ_ONCE(entry->pid);
> +               stack = READ_ONCE(entry->stack);
> +               is_free = READ_ONCE(entry->is_free);
> +
> +               /* Try detecting if the entry was changed while being read. */
> +               smp_mb();
> +               if (ptr != (void *)READ_ONCE(entry->ptr))
> +                       continue;

I thought the re-validation is no longer needed because of the rwlock
protection?

The rest looks fine now.

> +               if (is_free) {
> +                       /*
> +                        * Second free of the same object.
> +                        * Give up on trying to find the alloc entry.
> +                        */
> +                       if (free_found)
> +                               break;
> +
> +                       info->free_track.pid = pid;
> +                       info->free_track.stack = stack;
> +                       free_found = true;
> +               } else {
> +                       /* Second alloc of the same object. Give up. */
> +                       if (alloc_found)
> +                               break;
> +
> +                       info->alloc_track.pid = pid;
> +                       info->alloc_track.stack = stack;
> +                       alloc_found = true;
> +               }
> +       }
> +
> +       write_unlock_irqrestore(&stack_ring.lock, flags);
>  }
> diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
> index 39a0481e5228..07828021c1f5 100644
> --- a/mm/kasan/tags.c
> +++ b/mm/kasan/tags.c
> @@ -6,6 +6,7 @@
>   * Copyright (c) 2020 Google, Inc.
>   */
>
> +#include <linux/atomic.h>
>  #include <linux/init.h>
>  #include <linux/kasan.h>
>  #include <linux/kernel.h>
> @@ -16,11 +17,60 @@
>  #include <linux/types.h>
>
>  #include "kasan.h"
> +#include "../slab.h"
> +
> +/* Non-zero, as initial pointer values are 0. */
> +#define STACK_RING_BUSY_PTR ((void *)1)
> +
> +struct kasan_stack_ring stack_ring;
> +
> +static void save_stack_info(struct kmem_cache *cache, void *object,
> +                       gfp_t gfp_flags, bool is_free)
> +{
> +       unsigned long flags;
> +       depot_stack_handle_t stack;
> +       u64 pos;
> +       struct kasan_stack_ring_entry *entry;
> +       void *old_ptr;
> +
> +       stack = kasan_save_stack(gfp_flags, true);
> +
> +       /*
> +        * Prevent save_stack_info() from modifying stack ring
> +        * when kasan_complete_mode_report_info() is walking it.
> +        */
> +       read_lock_irqsave(&stack_ring.lock, flags);
> +
> +next:
> +       pos = atomic64_fetch_add(1, &stack_ring.pos);
> +       entry = &stack_ring.entries[pos % KASAN_STACK_RING_SIZE];
> +
> +       /* Detect stack ring entry slots that are being written to. */
> +       old_ptr = READ_ONCE(entry->ptr);
> +       if (old_ptr == STACK_RING_BUSY_PTR)
> +               goto next; /* Busy slot. */
> +       if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
> +               goto next; /* Busy slot. */
> +
> +       WRITE_ONCE(entry->size, cache->object_size);
> +       WRITE_ONCE(entry->pid, current->pid);
> +       WRITE_ONCE(entry->stack, stack);
> +       WRITE_ONCE(entry->is_free, is_free);
> +
> +       /*
> +        * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
> +        */
> +       smp_store_release(&entry->ptr, (s64)object);
> +
> +       read_unlock_irqrestore(&stack_ring.lock, flags);
> +}
>
>  void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
>  {
> +       save_stack_info(cache, object, flags, false);
>  }
>
>  void kasan_save_free_info(struct kmem_cache *cache, void *object)
>  {
> +       save_stack_info(cache, object, GFP_NOWAIT, true);
>  }
> --
> 2.25.1
>
Andrey Konovalov July 21, 2022, 8:41 p.m. UTC | #2
On Tue, Jul 19, 2022 at 1:41 PM Marco Elver <elver@google.com> wrote:
>
> > +       for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
> > +               if (alloc_found && free_found)
> > +                       break;
> > +
> > +               entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
> > +
> > +               /* Paired with smp_store_release() in save_stack_info(). */
> > +               ptr = (void *)smp_load_acquire(&entry->ptr);
> > +
> > +               if (kasan_reset_tag(ptr) != info->object ||
> > +                   get_tag(ptr) != get_tag(info->access_addr))
> > +                       continue;
> > +
> > +               pid = READ_ONCE(entry->pid);
> > +               stack = READ_ONCE(entry->stack);
> > +               is_free = READ_ONCE(entry->is_free);
> > +
> > +               /* Try detecting if the entry was changed while being read. */
> > +               smp_mb();
> > +               if (ptr != (void *)READ_ONCE(entry->ptr))
> > +                       continue;
>
> I thought the re-validation is no longer needed because of the rwlock
> protection?

Oh, yes, forgot to remove this. Will either do in v3 if there are more
things to fix, or will just send a small fix-up patch if the rest of
the series looks good.

> The rest looks fine now.

Thank you, Marco!
Andrey Konovalov Aug. 2, 2022, 8:45 p.m. UTC | #3
On Thu, Jul 21, 2022 at 10:41 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
>
> On Tue, Jul 19, 2022 at 1:41 PM Marco Elver <elver@google.com> wrote:
> >
> > > +       for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
> > > +               if (alloc_found && free_found)
> > > +                       break;
> > > +
> > > +               entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
> > > +
> > > +               /* Paired with smp_store_release() in save_stack_info(). */
> > > +               ptr = (void *)smp_load_acquire(&entry->ptr);
> > > +
> > > +               if (kasan_reset_tag(ptr) != info->object ||
> > > +                   get_tag(ptr) != get_tag(info->access_addr))
> > > +                       continue;
> > > +
> > > +               pid = READ_ONCE(entry->pid);
> > > +               stack = READ_ONCE(entry->stack);
> > > +               is_free = READ_ONCE(entry->is_free);
> > > +
> > > +               /* Try detecting if the entry was changed while being read. */
> > > +               smp_mb();
> > > +               if (ptr != (void *)READ_ONCE(entry->ptr))
> > > +                       continue;
> >
> > I thought the re-validation is no longer needed because of the rwlock
> > protection?
>
> Oh, yes, forgot to remove this. Will either do in v3 if there are more
> things to fix, or will just send a small fix-up patch if the rest of
> the series looks good.
>
> > The rest looks fine now.
>
> Thank you, Marco!

Hi Marco,

I'm thinking of sending a v3.

Does your "The rest looks fine now" comment refer only to this patch
or to the whole series? If it's the former, could you PTAL at the
other patches?

Thanks!
Marco Elver Aug. 3, 2022, 8:28 p.m. UTC | #4
On Tue, 2 Aug 2022 at 22:45, Andrey Konovalov <andreyknvl@gmail.com> wrote:
>
> On Thu, Jul 21, 2022 at 10:41 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
> >
> > On Tue, Jul 19, 2022 at 1:41 PM Marco Elver <elver@google.com> wrote:
> > >
> > > > +       for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
> > > > +               if (alloc_found && free_found)
> > > > +                       break;
> > > > +
> > > > +               entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
> > > > +
> > > > +               /* Paired with smp_store_release() in save_stack_info(). */
> > > > +               ptr = (void *)smp_load_acquire(&entry->ptr);
> > > > +
> > > > +               if (kasan_reset_tag(ptr) != info->object ||
> > > > +                   get_tag(ptr) != get_tag(info->access_addr))
> > > > +                       continue;
> > > > +
> > > > +               pid = READ_ONCE(entry->pid);
> > > > +               stack = READ_ONCE(entry->stack);
> > > > +               is_free = READ_ONCE(entry->is_free);
> > > > +
> > > > +               /* Try detecting if the entry was changed while being read. */
> > > > +               smp_mb();
> > > > +               if (ptr != (void *)READ_ONCE(entry->ptr))
> > > > +                       continue;
> > >
> > > I thought the re-validation is no longer needed because of the rwlock
> > > protection?
> >
> > Oh, yes, forgot to remove this. Will either do in v3 if there are more
> > things to fix, or will just send a small fix-up patch if the rest of
> > the series looks good.
> >
> > > The rest looks fine now.
> >
> > Thank you, Marco!
>
> Hi Marco,
>
> I'm thinking of sending a v3.
>
> Does your "The rest looks fine now" comment refer only to this patch
> or to the whole series? If it's the former, could you PTAL at the
> other patches?

I just looked again. Apart from the comments I just sent, overall it
looks fine (whole series).

Does test_kasan exercise the ring wrapping around? One thing that
might be worth doing is adding a multi-threaded stress test, where you
have 2+ threads doing lots of allocations, frees, and generating
reports.
Andrey Konovalov Sept. 5, 2022, 8:40 p.m. UTC | #5
On Wed, Aug 3, 2022 at 10:29 PM Marco Elver <elver@google.com> wrote:
>
> > Does your "The rest looks fine now" comment refer only to this patch
> > or to the whole series? If it's the former, could you PTAL at the
> > other patches?
>
> I just looked again. Apart from the comments I just sent, overall it
> looks fine (whole series).

Great, thanks! I'll put your Reviewed-by on all patches except the
ones I will change in v3.

> Does test_kasan exercise the ring wrapping around? One thing that
> might be worth doing is adding a multi-threaded stress test, where you
> have 2+ threads doing lots of allocations, frees, and generating
> reports.

There's probably not a lot of sense in adding this test: this part is
tested during kernel boot. Even with defconfig, the stack ring
overflows multiple times.

I will, however, add a test for a complicated use-after-free scenario
to make sure that KASAN points at the right kmalloc/kfree calls.
Before I get to implementing [1], the report contents will have to be
checked manually though.

Thanks!

[1] https://bugzilla.kernel.org/show_bug.cgi?id=212203
diff mbox series

Patch

diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 7df107dc400a..cfff81139d67 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -2,6 +2,7 @@ 
 #ifndef __MM_KASAN_KASAN_H
 #define __MM_KASAN_KASAN_H
 
+#include <linux/atomic.h>
 #include <linux/kasan.h>
 #include <linux/kasan-tags.h>
 #include <linux/kfence.h>
@@ -233,6 +234,26 @@  struct kasan_free_meta {
 
 #endif /* CONFIG_KASAN_GENERIC */
 
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
+struct kasan_stack_ring_entry {
+	void *ptr;
+	size_t size;
+	u32 pid;
+	depot_stack_handle_t stack;
+	bool is_free;
+};
+
+#define KASAN_STACK_RING_SIZE (32 << 10)
+
+struct kasan_stack_ring {
+	rwlock_t lock;
+	atomic64_t pos;
+	struct kasan_stack_ring_entry entries[KASAN_STACK_RING_SIZE];
+};
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 /* Used in KUnit-compatible KASAN tests. */
 struct kunit_kasan_status {
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index 5cbac2cdb177..a996489e6dac 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -4,8 +4,12 @@ 
  * Copyright (c) 2020 Google, Inc.
  */
 
+#include <linux/atomic.h>
+
 #include "kasan.h"
 
+extern struct kasan_stack_ring stack_ring;
+
 static const char *get_bug_type(struct kasan_report_info *info)
 {
 	/*
@@ -24,5 +28,77 @@  static const char *get_bug_type(struct kasan_report_info *info)
 
 void kasan_complete_mode_report_info(struct kasan_report_info *info)
 {
+	unsigned long flags;
+	u64 pos;
+	struct kasan_stack_ring_entry *entry;
+	void *ptr;
+	u32 pid;
+	depot_stack_handle_t stack;
+	bool is_free;
+	bool alloc_found = false, free_found = false;
+
 	info->bug_type = get_bug_type(info);
+
+	if (!info->cache || !info->object)
+		return;
+	}
+
+	write_lock_irqsave(&stack_ring.lock, flags);
+
+	pos = atomic64_read(&stack_ring.pos);
+
+	/*
+	 * The loop below tries to find stack ring entries relevant to the
+	 * buggy object. This is a best-effort process.
+	 *
+	 * First, another object with the same tag can be allocated in place of
+	 * the buggy object. Also, since the number of entries is limited, the
+	 * entries relevant to the buggy object can be overwritten.
+	 */
+
+	for (u64 i = pos - 1; i != pos - 1 - KASAN_STACK_RING_SIZE; i--) {
+		if (alloc_found && free_found)
+			break;
+
+		entry = &stack_ring.entries[i % KASAN_STACK_RING_SIZE];
+
+		/* Paired with smp_store_release() in save_stack_info(). */
+		ptr = (void *)smp_load_acquire(&entry->ptr);
+
+		if (kasan_reset_tag(ptr) != info->object ||
+		    get_tag(ptr) != get_tag(info->access_addr))
+			continue;
+
+		pid = READ_ONCE(entry->pid);
+		stack = READ_ONCE(entry->stack);
+		is_free = READ_ONCE(entry->is_free);
+
+		/* Try detecting if the entry was changed while being read. */
+		smp_mb();
+		if (ptr != (void *)READ_ONCE(entry->ptr))
+			continue;
+
+		if (is_free) {
+			/*
+			 * Second free of the same object.
+			 * Give up on trying to find the alloc entry.
+			 */
+			if (free_found)
+				break;
+
+			info->free_track.pid = pid;
+			info->free_track.stack = stack;
+			free_found = true;
+		} else {
+			/* Second alloc of the same object. Give up. */
+			if (alloc_found)
+				break;
+
+			info->alloc_track.pid = pid;
+			info->alloc_track.stack = stack;
+			alloc_found = true;
+		}
+	}
+
+	write_unlock_irqrestore(&stack_ring.lock, flags);
 }
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 39a0481e5228..07828021c1f5 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -6,6 +6,7 @@ 
  * Copyright (c) 2020 Google, Inc.
  */
 
+#include <linux/atomic.h>
 #include <linux/init.h>
 #include <linux/kasan.h>
 #include <linux/kernel.h>
@@ -16,11 +17,60 @@ 
 #include <linux/types.h>
 
 #include "kasan.h"
+#include "../slab.h"
+
+/* Non-zero, as initial pointer values are 0. */
+#define STACK_RING_BUSY_PTR ((void *)1)
+
+struct kasan_stack_ring stack_ring;
+
+static void save_stack_info(struct kmem_cache *cache, void *object,
+			gfp_t gfp_flags, bool is_free)
+{
+	unsigned long flags;
+	depot_stack_handle_t stack;
+	u64 pos;
+	struct kasan_stack_ring_entry *entry;
+	void *old_ptr;
+
+	stack = kasan_save_stack(gfp_flags, true);
+
+	/*
+	 * Prevent save_stack_info() from modifying stack ring
+	 * when kasan_complete_mode_report_info() is walking it.
+	 */
+	read_lock_irqsave(&stack_ring.lock, flags);
+
+next:
+	pos = atomic64_fetch_add(1, &stack_ring.pos);
+	entry = &stack_ring.entries[pos % KASAN_STACK_RING_SIZE];
+
+	/* Detect stack ring entry slots that are being written to. */
+	old_ptr = READ_ONCE(entry->ptr);
+	if (old_ptr == STACK_RING_BUSY_PTR)
+		goto next; /* Busy slot. */
+	if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
+		goto next; /* Busy slot. */
+
+	WRITE_ONCE(entry->size, cache->object_size);
+	WRITE_ONCE(entry->pid, current->pid);
+	WRITE_ONCE(entry->stack, stack);
+	WRITE_ONCE(entry->is_free, is_free);
+
+	/*
+	 * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
+	 */
+	smp_store_release(&entry->ptr, (s64)object);
+
+	read_unlock_irqrestore(&stack_ring.lock, flags);
+}
 
 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
 {
+	save_stack_info(cache, object, flags, false);
 }
 
 void kasan_save_free_info(struct kmem_cache *cache, void *object)
 {
+	save_stack_info(cache, object, GFP_NOWAIT, true);
 }