diff mbox series

kasan: Remove kasan_record_aux_stack_noalloc().

Message ID 20241119155701.GYennzPF@linutronix.de (mailing list archive)
State New
Headers show
Series kasan: Remove kasan_record_aux_stack_noalloc(). | expand

Commit Message

Sebastian Andrzej Siewior Nov. 19, 2024, 3:57 p.m. UTC
From: Peter Zijlstra <peterz@infradead.org>

kasan_record_aux_stack_noalloc() was introduced to record a stack trace
without allocating memory in the process. It has been added to callers
which were invoked while a raw_spinlock_t was held.
More and more callers were identified and changed over time. Is it a
good thing to have this while functions try their best to do a
locklessly setup? The only downside of having kasan_record_aux_stack()
not allocate any memory is that we end up without a stacktrace if
stackdepot runs out of memory and at the same stacktrace was not
recorded before. Marco Elver said in
	https://lore.kernel.org/all/20210913112609.2651084-1-elver@google.com/
that this is rare.

Make the kasan_record_aux_stack_noalloc() behaviour default as
kasan_record_aux_stack().

[bigeasy: Dressed the diff as patch. ]

Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---

Didn't add a Fixes tag, didn't want to put 
   7cb3007ce2da2 ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")

there.

 include/linux/kasan.h     |  2 --
 include/linux/task_work.h |  3 ---
 kernel/irq_work.c         |  2 +-
 kernel/rcu/tiny.c         |  2 +-
 kernel/rcu/tree.c         |  4 ++--
 kernel/sched/core.c       |  2 +-
 kernel/task_work.c        | 14 +-------------
 kernel/workqueue.c        |  2 +-
 mm/kasan/generic.c        | 14 ++------------
 mm/slub.c                 |  2 +-
 10 files changed, 10 insertions(+), 37 deletions(-)

Comments

Waiman Long Nov. 19, 2024, 4:22 p.m. UTC | #1
On 11/19/24 10:57 AM, Sebastian Andrzej Siewior wrote:
> From: Peter Zijlstra <peterz@infradead.org>
>
> kasan_record_aux_stack_noalloc() was introduced to record a stack trace
> without allocating memory in the process. It has been added to callers
> which were invoked while a raw_spinlock_t was held.
> More and more callers were identified and changed over time. Is it a
> good thing to have this while functions try their best to do a
> locklessly setup? The only downside of having kasan_record_aux_stack()
> not allocate any memory is that we end up without a stacktrace if
> stackdepot runs out of memory and at the same stacktrace was not
> recorded before. Marco Elver said in
> 	https://lore.kernel.org/all/20210913112609.2651084-1-elver@google.com/
> that this is rare.
>
> Make the kasan_record_aux_stack_noalloc() behaviour default as
> kasan_record_aux_stack().
>
> [bigeasy: Dressed the diff as patch. ]
>
> Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
> Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
>
> Didn't add a Fixes tag, didn't want to put
>     7cb3007ce2da2 ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
>
> there.

Right now task_work_add() is the only caller of 
kasan_record_aux_stack(). So it essentially make all its callers use the 
noalloc version of kasan_record_aux_stack().

Acked-by: Waiman Long <longman@redhat.com>

>   include/linux/kasan.h     |  2 --
>   include/linux/task_work.h |  3 ---
>   kernel/irq_work.c         |  2 +-
>   kernel/rcu/tiny.c         |  2 +-
>   kernel/rcu/tree.c         |  4 ++--
>   kernel/sched/core.c       |  2 +-
>   kernel/task_work.c        | 14 +-------------
>   kernel/workqueue.c        |  2 +-
>   mm/kasan/generic.c        | 14 ++------------
>   mm/slub.c                 |  2 +-
>   10 files changed, 10 insertions(+), 37 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 00a3bf7c0d8f0..1a623818e8b39 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -488,7 +488,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
>   void kasan_cache_shrink(struct kmem_cache *cache);
>   void kasan_cache_shutdown(struct kmem_cache *cache);
>   void kasan_record_aux_stack(void *ptr);
> -void kasan_record_aux_stack_noalloc(void *ptr);
>   
>   #else /* CONFIG_KASAN_GENERIC */
>   
> @@ -506,7 +505,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
>   static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
>   static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
>   static inline void kasan_record_aux_stack(void *ptr) {}
> -static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
>   
>   #endif /* CONFIG_KASAN_GENERIC */
>   
> diff --git a/include/linux/task_work.h b/include/linux/task_work.h
> index 2964171856e00..0646804860ff1 100644
> --- a/include/linux/task_work.h
> +++ b/include/linux/task_work.h
> @@ -19,9 +19,6 @@ enum task_work_notify_mode {
>   	TWA_SIGNAL,
>   	TWA_SIGNAL_NO_IPI,
>   	TWA_NMI_CURRENT,
> -
> -	TWA_FLAGS = 0xff00,
> -	TWAF_NO_ALLOC = 0x0100,
>   };
>   
>   static inline bool task_work_pending(struct task_struct *task)
> diff --git a/kernel/irq_work.c b/kernel/irq_work.c
> index 2f4fb336dda17..73f7e1fd4ab4d 100644
> --- a/kernel/irq_work.c
> +++ b/kernel/irq_work.c
> @@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
>   	if (!irq_work_claim(work))
>   		return false;
>   
> -	kasan_record_aux_stack_noalloc(work);
> +	kasan_record_aux_stack(work);
>   
>   	preempt_disable();
>   	if (cpu != smp_processor_id()) {
> diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
> index b3b3ce34df631..4b3f319114650 100644
> --- a/kernel/rcu/tiny.c
> +++ b/kernel/rcu/tiny.c
> @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
>   void kvfree_call_rcu(struct rcu_head *head, void *ptr)
>   {
>   	if (head)
> -		kasan_record_aux_stack_noalloc(ptr);
> +		kasan_record_aux_stack(ptr);
>   
>   	__kvfree_call_rcu(head, ptr);
>   }
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index b1f883fcd9185..7eae9bd818a90 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
>   	}
>   	head->func = func;
>   	head->next = NULL;
> -	kasan_record_aux_stack_noalloc(head);
> +	kasan_record_aux_stack(head);
>   	local_irq_save(flags);
>   	rdp = this_cpu_ptr(&rcu_data);
>   	lazy = lazy_in && !rcu_async_should_hurry();
> @@ -3807,7 +3807,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
>   		return;
>   	}
>   
> -	kasan_record_aux_stack_noalloc(ptr);
> +	kasan_record_aux_stack(ptr);
>   	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
>   	if (!success) {
>   		run_page_cache_worker(krcp);
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index a1c353a62c568..3717360a940d2 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10485,7 +10485,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
>   		return;
>   
>   	/* No page allocation under rq lock */
> -	task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
> +	task_work_add(curr, work, TWA_RESUME);
>   }
>   
>   void sched_mm_cid_exit_signals(struct task_struct *t)
> diff --git a/kernel/task_work.c b/kernel/task_work.c
> index c969f1f26be58..d1efec571a4a4 100644
> --- a/kernel/task_work.c
> +++ b/kernel/task_work.c
> @@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
>   		  enum task_work_notify_mode notify)
>   {
>   	struct callback_head *head;
> -	int flags = notify & TWA_FLAGS;
>   
> -	notify &= ~TWA_FLAGS;
>   	if (notify == TWA_NMI_CURRENT) {
>   		if (WARN_ON_ONCE(task != current))
>   			return -EINVAL;
>   		if (!IS_ENABLED(CONFIG_IRQ_WORK))
>   			return -EINVAL;
>   	} else {
> -		/*
> -		 * Record the work call stack in order to print it in KASAN
> -		 * reports.
> -		 *
> -		 * Note that stack allocation can fail if TWAF_NO_ALLOC flag
> -		 * is set and new page is needed to expand the stack buffer.
> -		 */
> -		if (flags & TWAF_NO_ALLOC)
> -			kasan_record_aux_stack_noalloc(work);
> -		else
> -			kasan_record_aux_stack(work);
> +		kasan_record_aux_stack(work);
>   	}
>   
>   	head = READ_ONCE(task->task_works);
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 9949ffad8df09..65b8314b2d538 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
>   	debug_work_activate(work);
>   
>   	/* record the work call stack in order to print it in KASAN reports */
> -	kasan_record_aux_stack_noalloc(work);
> +	kasan_record_aux_stack(work);
>   
>   	/* we own @work, set data and link */
>   	set_work_pwq(work, pwq, extra_flags);
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 6310a180278b6..b18b5944997f8 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -521,7 +521,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
>   			sizeof(struct kasan_free_meta) : 0);
>   }
>   
> -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> +void kasan_record_aux_stack(void *addr)
>   {
>   	struct slab *slab = kasan_addr_to_slab(addr);
>   	struct kmem_cache *cache;
> @@ -538,17 +538,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
>   		return;
>   
>   	alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
> -	alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
> -}
> -
> -void kasan_record_aux_stack(void *addr)
> -{
> -	return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
> -}
> -
> -void kasan_record_aux_stack_noalloc(void *addr)
> -{
> -	return __kasan_record_aux_stack(addr, 0);
> +	alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
>   }
>   
>   void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> diff --git a/mm/slub.c b/mm/slub.c
> index 5b832512044e3..b8c4bf3fe0d07 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2300,7 +2300,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
>   			 * We have to do this manually because the rcu_head is
>   			 * not located inside the object.
>   			 */
> -			kasan_record_aux_stack_noalloc(x);
> +			kasan_record_aux_stack(x);
>   
>   			delayed_free->object = x;
>   			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
Marco Elver Nov. 19, 2024, 4:37 p.m. UTC | #2
On Tue, 19 Nov 2024 at 16:57, Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> From: Peter Zijlstra <peterz@infradead.org>

The patch title is misleading - it might suggest the opposite of what
it's doing. I think this might be clearer:

"kasan: Make kasan_record_aux_stack_noalloc() the default behaviour"

Which is also more or less what you say below.

> kasan_record_aux_stack_noalloc() was introduced to record a stack trace
> without allocating memory in the process. It has been added to callers
> which were invoked while a raw_spinlock_t was held.
> More and more callers were identified and changed over time. Is it a
> good thing to have this while functions try their best to do a
> locklessly setup? The only downside of having kasan_record_aux_stack()
> not allocate any memory is that we end up without a stacktrace if
> stackdepot runs out of memory and at the same stacktrace was not
> recorded before. Marco Elver said in
>         https://lore.kernel.org/all/20210913112609.2651084-1-elver@google.com/
> that this is rare.
>
> Make the kasan_record_aux_stack_noalloc() behaviour default as
> kasan_record_aux_stack().
>
> [bigeasy: Dressed the diff as patch. ]
>
> Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
> Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Reviewed-by: Marco Elver <elver@google.com>

As I wrote in https://lore.kernel.org/all/CANpmjNPmQYJ7pv1N3cuU8cP18u7PP_uoZD8YxwZd4jtbof9nVQ@mail.gmail.com/:

> I'd be in favor, it simplifies things. And stack depot should be
> able to replenish its pool sufficiently in the "non-aux" cases
> i.e. regular allocations. Worst case we fail to record some
> aux stacks, but I think that's only really bad if there's a bug
> around one of these allocations. In general the probabilities
> of this being a regression are extremely small [...]

Good riddance.

Thanks,
-- Marco

> ---
>
> Didn't add a Fixes tag, didn't want to put
>    7cb3007ce2da2 ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
>
> there.
>
>  include/linux/kasan.h     |  2 --
>  include/linux/task_work.h |  3 ---
>  kernel/irq_work.c         |  2 +-
>  kernel/rcu/tiny.c         |  2 +-
>  kernel/rcu/tree.c         |  4 ++--
>  kernel/sched/core.c       |  2 +-
>  kernel/task_work.c        | 14 +-------------
>  kernel/workqueue.c        |  2 +-
>  mm/kasan/generic.c        | 14 ++------------
>  mm/slub.c                 |  2 +-
>  10 files changed, 10 insertions(+), 37 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 00a3bf7c0d8f0..1a623818e8b39 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -488,7 +488,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
>  void kasan_cache_shrink(struct kmem_cache *cache);
>  void kasan_cache_shutdown(struct kmem_cache *cache);
>  void kasan_record_aux_stack(void *ptr);
> -void kasan_record_aux_stack_noalloc(void *ptr);
>
>  #else /* CONFIG_KASAN_GENERIC */
>
> @@ -506,7 +505,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
>  static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
>  static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
>  static inline void kasan_record_aux_stack(void *ptr) {}
> -static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
>
>  #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/include/linux/task_work.h b/include/linux/task_work.h
> index 2964171856e00..0646804860ff1 100644
> --- a/include/linux/task_work.h
> +++ b/include/linux/task_work.h
> @@ -19,9 +19,6 @@ enum task_work_notify_mode {
>         TWA_SIGNAL,
>         TWA_SIGNAL_NO_IPI,
>         TWA_NMI_CURRENT,
> -
> -       TWA_FLAGS = 0xff00,
> -       TWAF_NO_ALLOC = 0x0100,
>  };
>
>  static inline bool task_work_pending(struct task_struct *task)
> diff --git a/kernel/irq_work.c b/kernel/irq_work.c
> index 2f4fb336dda17..73f7e1fd4ab4d 100644
> --- a/kernel/irq_work.c
> +++ b/kernel/irq_work.c
> @@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
>         if (!irq_work_claim(work))
>                 return false;
>
> -       kasan_record_aux_stack_noalloc(work);
> +       kasan_record_aux_stack(work);
>
>         preempt_disable();
>         if (cpu != smp_processor_id()) {
> diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
> index b3b3ce34df631..4b3f319114650 100644
> --- a/kernel/rcu/tiny.c
> +++ b/kernel/rcu/tiny.c
> @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
>  void kvfree_call_rcu(struct rcu_head *head, void *ptr)
>  {
>         if (head)
> -               kasan_record_aux_stack_noalloc(ptr);
> +               kasan_record_aux_stack(ptr);
>
>         __kvfree_call_rcu(head, ptr);
>  }
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index b1f883fcd9185..7eae9bd818a90 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
>         }
>         head->func = func;
>         head->next = NULL;
> -       kasan_record_aux_stack_noalloc(head);
> +       kasan_record_aux_stack(head);
>         local_irq_save(flags);
>         rdp = this_cpu_ptr(&rcu_data);
>         lazy = lazy_in && !rcu_async_should_hurry();
> @@ -3807,7 +3807,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
>                 return;
>         }
>
> -       kasan_record_aux_stack_noalloc(ptr);
> +       kasan_record_aux_stack(ptr);
>         success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
>         if (!success) {
>                 run_page_cache_worker(krcp);
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index a1c353a62c568..3717360a940d2 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10485,7 +10485,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
>                 return;
>
>         /* No page allocation under rq lock */
> -       task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
> +       task_work_add(curr, work, TWA_RESUME);
>  }
>
>  void sched_mm_cid_exit_signals(struct task_struct *t)
> diff --git a/kernel/task_work.c b/kernel/task_work.c
> index c969f1f26be58..d1efec571a4a4 100644
> --- a/kernel/task_work.c
> +++ b/kernel/task_work.c
> @@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
>                   enum task_work_notify_mode notify)
>  {
>         struct callback_head *head;
> -       int flags = notify & TWA_FLAGS;
>
> -       notify &= ~TWA_FLAGS;
>         if (notify == TWA_NMI_CURRENT) {
>                 if (WARN_ON_ONCE(task != current))
>                         return -EINVAL;
>                 if (!IS_ENABLED(CONFIG_IRQ_WORK))
>                         return -EINVAL;
>         } else {
> -               /*
> -                * Record the work call stack in order to print it in KASAN
> -                * reports.
> -                *
> -                * Note that stack allocation can fail if TWAF_NO_ALLOC flag
> -                * is set and new page is needed to expand the stack buffer.
> -                */
> -               if (flags & TWAF_NO_ALLOC)
> -                       kasan_record_aux_stack_noalloc(work);
> -               else
> -                       kasan_record_aux_stack(work);
> +               kasan_record_aux_stack(work);
>         }
>
>         head = READ_ONCE(task->task_works);
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 9949ffad8df09..65b8314b2d538 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
>         debug_work_activate(work);
>
>         /* record the work call stack in order to print it in KASAN reports */
> -       kasan_record_aux_stack_noalloc(work);
> +       kasan_record_aux_stack(work);
>
>         /* we own @work, set data and link */
>         set_work_pwq(work, pwq, extra_flags);
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 6310a180278b6..b18b5944997f8 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -521,7 +521,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
>                         sizeof(struct kasan_free_meta) : 0);
>  }
>
> -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> +void kasan_record_aux_stack(void *addr)
>  {
>         struct slab *slab = kasan_addr_to_slab(addr);
>         struct kmem_cache *cache;
> @@ -538,17 +538,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
>                 return;
>
>         alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
> -       alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
> -}
> -
> -void kasan_record_aux_stack(void *addr)
> -{
> -       return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
> -}
> -
> -void kasan_record_aux_stack_noalloc(void *addr)
> -{
> -       return __kasan_record_aux_stack(addr, 0);
> +       alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
>  }
>
>  void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> diff --git a/mm/slub.c b/mm/slub.c
> index 5b832512044e3..b8c4bf3fe0d07 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2300,7 +2300,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
>                          * We have to do this manually because the rcu_head is
>                          * not located inside the object.
>                          */
> -                       kasan_record_aux_stack_noalloc(x);
> +                       kasan_record_aux_stack(x);
>
>                         delayed_free->object = x;
>                         call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
> --
> 2.45.2
>
Andrey Konovalov Nov. 19, 2024, 7:36 p.m. UTC | #3
On Tue, Nov 19, 2024 at 4:57 PM Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> From: Peter Zijlstra <peterz@infradead.org>
>
> kasan_record_aux_stack_noalloc() was introduced to record a stack trace
> without allocating memory in the process. It has been added to callers
> which were invoked while a raw_spinlock_t was held.
> More and more callers were identified and changed over time. Is it a
> good thing to have this while functions try their best to do a
> locklessly setup? The only downside of having kasan_record_aux_stack()
> not allocate any memory is that we end up without a stacktrace if
> stackdepot runs out of memory and at the same stacktrace was not
> recorded before. Marco Elver said in
>         https://lore.kernel.org/all/20210913112609.2651084-1-elver@google.com/
> that this is rare.
>
> Make the kasan_record_aux_stack_noalloc() behaviour default as
> kasan_record_aux_stack().
>
> [bigeasy: Dressed the diff as patch. ]
>
> Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
> Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
>
> Didn't add a Fixes tag, didn't want to put
>    7cb3007ce2da2 ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
>
> there.
>
>  include/linux/kasan.h     |  2 --
>  include/linux/task_work.h |  3 ---
>  kernel/irq_work.c         |  2 +-
>  kernel/rcu/tiny.c         |  2 +-
>  kernel/rcu/tree.c         |  4 ++--
>  kernel/sched/core.c       |  2 +-
>  kernel/task_work.c        | 14 +-------------
>  kernel/workqueue.c        |  2 +-
>  mm/kasan/generic.c        | 14 ++------------
>  mm/slub.c                 |  2 +-
>  10 files changed, 10 insertions(+), 37 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 00a3bf7c0d8f0..1a623818e8b39 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -488,7 +488,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
>  void kasan_cache_shrink(struct kmem_cache *cache);
>  void kasan_cache_shutdown(struct kmem_cache *cache);
>  void kasan_record_aux_stack(void *ptr);
> -void kasan_record_aux_stack_noalloc(void *ptr);
>
>  #else /* CONFIG_KASAN_GENERIC */
>
> @@ -506,7 +505,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
>  static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
>  static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
>  static inline void kasan_record_aux_stack(void *ptr) {}
> -static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
>
>  #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/include/linux/task_work.h b/include/linux/task_work.h
> index 2964171856e00..0646804860ff1 100644
> --- a/include/linux/task_work.h
> +++ b/include/linux/task_work.h
> @@ -19,9 +19,6 @@ enum task_work_notify_mode {
>         TWA_SIGNAL,
>         TWA_SIGNAL_NO_IPI,
>         TWA_NMI_CURRENT,
> -
> -       TWA_FLAGS = 0xff00,
> -       TWAF_NO_ALLOC = 0x0100,
>  };
>
>  static inline bool task_work_pending(struct task_struct *task)
> diff --git a/kernel/irq_work.c b/kernel/irq_work.c
> index 2f4fb336dda17..73f7e1fd4ab4d 100644
> --- a/kernel/irq_work.c
> +++ b/kernel/irq_work.c
> @@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
>         if (!irq_work_claim(work))
>                 return false;
>
> -       kasan_record_aux_stack_noalloc(work);
> +       kasan_record_aux_stack(work);
>
>         preempt_disable();
>         if (cpu != smp_processor_id()) {
> diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
> index b3b3ce34df631..4b3f319114650 100644
> --- a/kernel/rcu/tiny.c
> +++ b/kernel/rcu/tiny.c
> @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
>  void kvfree_call_rcu(struct rcu_head *head, void *ptr)
>  {
>         if (head)
> -               kasan_record_aux_stack_noalloc(ptr);
> +               kasan_record_aux_stack(ptr);
>
>         __kvfree_call_rcu(head, ptr);
>  }
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index b1f883fcd9185..7eae9bd818a90 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
>         }
>         head->func = func;
>         head->next = NULL;
> -       kasan_record_aux_stack_noalloc(head);
> +       kasan_record_aux_stack(head);
>         local_irq_save(flags);
>         rdp = this_cpu_ptr(&rcu_data);
>         lazy = lazy_in && !rcu_async_should_hurry();
> @@ -3807,7 +3807,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
>                 return;
>         }
>
> -       kasan_record_aux_stack_noalloc(ptr);
> +       kasan_record_aux_stack(ptr);
>         success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
>         if (!success) {
>                 run_page_cache_worker(krcp);
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index a1c353a62c568..3717360a940d2 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10485,7 +10485,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
>                 return;
>
>         /* No page allocation under rq lock */
> -       task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
> +       task_work_add(curr, work, TWA_RESUME);
>  }
>
>  void sched_mm_cid_exit_signals(struct task_struct *t)
> diff --git a/kernel/task_work.c b/kernel/task_work.c
> index c969f1f26be58..d1efec571a4a4 100644
> --- a/kernel/task_work.c
> +++ b/kernel/task_work.c
> @@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
>                   enum task_work_notify_mode notify)
>  {
>         struct callback_head *head;
> -       int flags = notify & TWA_FLAGS;
>
> -       notify &= ~TWA_FLAGS;
>         if (notify == TWA_NMI_CURRENT) {
>                 if (WARN_ON_ONCE(task != current))
>                         return -EINVAL;
>                 if (!IS_ENABLED(CONFIG_IRQ_WORK))
>                         return -EINVAL;
>         } else {
> -               /*
> -                * Record the work call stack in order to print it in KASAN
> -                * reports.
> -                *
> -                * Note that stack allocation can fail if TWAF_NO_ALLOC flag
> -                * is set and new page is needed to expand the stack buffer.
> -                */
> -               if (flags & TWAF_NO_ALLOC)
> -                       kasan_record_aux_stack_noalloc(work);
> -               else
> -                       kasan_record_aux_stack(work);
> +               kasan_record_aux_stack(work);
>         }
>
>         head = READ_ONCE(task->task_works);
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 9949ffad8df09..65b8314b2d538 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
>         debug_work_activate(work);
>
>         /* record the work call stack in order to print it in KASAN reports */
> -       kasan_record_aux_stack_noalloc(work);
> +       kasan_record_aux_stack(work);
>
>         /* we own @work, set data and link */
>         set_work_pwq(work, pwq, extra_flags);
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 6310a180278b6..b18b5944997f8 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -521,7 +521,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
>                         sizeof(struct kasan_free_meta) : 0);
>  }
>
> -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)

Could you add a comment here that notes the usage, something like:

"This function avoids dynamic memory allocations and thus can be
called from contexts that do not allow allocating memory."

> +void kasan_record_aux_stack(void *addr)
>  {
>         struct slab *slab = kasan_addr_to_slab(addr);
>         struct kmem_cache *cache;
> @@ -538,17 +538,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
>                 return;
>
>         alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
> -       alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
> -}
> -
> -void kasan_record_aux_stack(void *addr)
> -{
> -       return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
> -}
> -
> -void kasan_record_aux_stack_noalloc(void *addr)
> -{
> -       return __kasan_record_aux_stack(addr, 0);
> +       alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
>  }
>
>  void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> diff --git a/mm/slub.c b/mm/slub.c
> index 5b832512044e3..b8c4bf3fe0d07 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2300,7 +2300,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
>                          * We have to do this manually because the rcu_head is
>                          * not located inside the object.
>                          */
> -                       kasan_record_aux_stack_noalloc(x);
> +                       kasan_record_aux_stack(x);
>
>                         delayed_free->object = x;
>                         call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
> --
> 2.45.2
>

Otherwise,

Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>

Thank you!
Sebastian Andrzej Siewior Nov. 22, 2024, 11:32 a.m. UTC | #4
On 2024-11-19 20:36:56 [+0100], Andrey Konovalov wrote:
> > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > index 6310a180278b6..b18b5944997f8 100644
> > --- a/mm/kasan/generic.c
> > +++ b/mm/kasan/generic.c
> > @@ -521,7 +521,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
> >                         sizeof(struct kasan_free_meta) : 0);
> >  }
> >
> > -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> 
> Could you add a comment here that notes the usage, something like:
> 
> "This function avoids dynamic memory allocations and thus can be
> called from contexts that do not allow allocating memory."
> 
> > +void kasan_record_aux_stack(void *addr)
> >  {
…
Added but would prefer to add a pointer to stack_depot_save_flags()
which has this Context: paragraph. Would that work?
Now looking at it, it says:
|  * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
|  *          alloc_pages() cannot be used from the current context. Currently
|  *          this is the case for contexts where neither %GFP_ATOMIC nor
|  *          %GFP_NOWAIT can be used (NMI, raw_spin_lock).

If I understand this correctly then STACK_DEPOT_FLAG_CAN_ALLOC must not
be specified if invoked from NMI. This will stop
stack_depot_save_flags() from allocating memory the function will still
acquire pool_lock, right?
Do we need to update the comment saying that it must not be used from
NMI or do we make it jump over the locked section in the NMI case?

Sebastian
Marco Elver Nov. 22, 2024, 3:01 p.m. UTC | #5
On Fri, Nov 22, 2024 at 12:32PM +0100, Sebastian Andrzej Siewior wrote:
> On 2024-11-19 20:36:56 [+0100], Andrey Konovalov wrote:
> > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > index 6310a180278b6..b18b5944997f8 100644
> > > --- a/mm/kasan/generic.c
> > > +++ b/mm/kasan/generic.c
> > > @@ -521,7 +521,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
> > >                         sizeof(struct kasan_free_meta) : 0);
> > >  }
> > >
> > > -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> > 
> > Could you add a comment here that notes the usage, something like:
> > 
> > "This function avoids dynamic memory allocations and thus can be
> > called from contexts that do not allow allocating memory."
> > 
> > > +void kasan_record_aux_stack(void *addr)
> > >  {
> …
> Added but would prefer to add a pointer to stack_depot_save_flags()
> which has this Context: paragraph. Would that work?
> Now looking at it, it says:
> |  * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
> |  *          alloc_pages() cannot be used from the current context. Currently
> |  *          this is the case for contexts where neither %GFP_ATOMIC nor
> |  *          %GFP_NOWAIT can be used (NMI, raw_spin_lock).
> 
> If I understand this correctly then STACK_DEPOT_FLAG_CAN_ALLOC must not
> be specified if invoked from NMI. This will stop
> stack_depot_save_flags() from allocating memory the function will still
> acquire pool_lock, right?
> Do we need to update the comment saying that it must not be used from
> NMI or do we make it jump over the locked section in the NMI case?

Good point. It was meant to also be usable from NMI, because it's very
likely to succeed, and should just take the lock-less fast path once the
stack is in the depot.

But I think we need a fix like this for initial saving of a stack trace:


diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 5ed34cc963fc..245d5b416699 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -630,7 +630,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
 			prealloc = page_address(page);
 	}
 
-	raw_spin_lock_irqsave(&pool_lock, flags);
+	if (in_nmi()) {
+		/* We can never allocate in NMI context. */
+		WARN_ON_ONCE(can_alloc);
+		/* Best effort; bail if we fail to take the lock. */
+		if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+			goto exit;
+	} else {
+		raw_spin_lock_irqsave(&pool_lock, flags);
+	}
 	printk_deferred_enter();
 
 	/* Try to find again, to avoid concurrently inserting duplicates. */


If that looks reasonable, I'll turn it into a patch.
Sebastian Andrzej Siewior Nov. 22, 2024, 3:29 p.m. UTC | #6
On 2024-11-22 16:01:29 [+0100], Marco Elver wrote:
> > Do we need to update the comment saying that it must not be used from
> > NMI or do we make it jump over the locked section in the NMI case?
> 
> Good point. It was meant to also be usable from NMI, because it's very
> likely to succeed, and should just take the lock-less fast path once the
> stack is in the depot.
> 
> But I think we need a fix like this for initial saving of a stack trace:
> 
> 
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 5ed34cc963fc..245d5b416699 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -630,7 +630,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
>  			prealloc = page_address(page);
>  	}
>  
> -	raw_spin_lock_irqsave(&pool_lock, flags);
> +	if (in_nmi()) {
> +		/* We can never allocate in NMI context. */
> +		WARN_ON_ONCE(can_alloc);
> +		/* Best effort; bail if we fail to take the lock. */
> +		if (!raw_spin_trylock_irqsave(&pool_lock, flags))
> +			goto exit;
> +	} else {
> +		raw_spin_lock_irqsave(&pool_lock, flags);
> +	}
>  	printk_deferred_enter();
>  
>  	/* Try to find again, to avoid concurrently inserting duplicates. */
> 
> 
> If that looks reasonable, I'll turn it into a patch.

Yes, looks reasonable.

Sebastian
diff mbox series

Patch

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 00a3bf7c0d8f0..1a623818e8b39 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -488,7 +488,6 @@  void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 void kasan_cache_shrink(struct kmem_cache *cache);
 void kasan_cache_shutdown(struct kmem_cache *cache);
 void kasan_record_aux_stack(void *ptr);
-void kasan_record_aux_stack_noalloc(void *ptr);
 
 #else /* CONFIG_KASAN_GENERIC */
 
@@ -506,7 +505,6 @@  static inline void kasan_cache_create(struct kmem_cache *cache,
 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
 static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
 
 #endif /* CONFIG_KASAN_GENERIC */
 
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 2964171856e00..0646804860ff1 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -19,9 +19,6 @@  enum task_work_notify_mode {
 	TWA_SIGNAL,
 	TWA_SIGNAL_NO_IPI,
 	TWA_NMI_CURRENT,
-
-	TWA_FLAGS = 0xff00,
-	TWAF_NO_ALLOC = 0x0100,
 };
 
 static inline bool task_work_pending(struct task_struct *task)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 2f4fb336dda17..73f7e1fd4ab4d 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -147,7 +147,7 @@  bool irq_work_queue_on(struct irq_work *work, int cpu)
 	if (!irq_work_claim(work))
 		return false;
 
-	kasan_record_aux_stack_noalloc(work);
+	kasan_record_aux_stack(work);
 
 	preempt_disable();
 	if (cpu != smp_processor_id()) {
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index b3b3ce34df631..4b3f319114650 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -250,7 +250,7 @@  EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
 	if (head)
-		kasan_record_aux_stack_noalloc(ptr);
+		kasan_record_aux_stack(ptr);
 
 	__kvfree_call_rcu(head, ptr);
 }
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b1f883fcd9185..7eae9bd818a90 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3083,7 +3083,7 @@  __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
 	}
 	head->func = func;
 	head->next = NULL;
-	kasan_record_aux_stack_noalloc(head);
+	kasan_record_aux_stack(head);
 	local_irq_save(flags);
 	rdp = this_cpu_ptr(&rcu_data);
 	lazy = lazy_in && !rcu_async_should_hurry();
@@ -3807,7 +3807,7 @@  void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 		return;
 	}
 
-	kasan_record_aux_stack_noalloc(ptr);
+	kasan_record_aux_stack(ptr);
 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
 	if (!success) {
 		run_page_cache_worker(krcp);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a1c353a62c568..3717360a940d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10485,7 +10485,7 @@  void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
 		return;
 
 	/* No page allocation under rq lock */
-	task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+	task_work_add(curr, work, TWA_RESUME);
 }
 
 void sched_mm_cid_exit_signals(struct task_struct *t)
diff --git a/kernel/task_work.c b/kernel/task_work.c
index c969f1f26be58..d1efec571a4a4 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -55,26 +55,14 @@  int task_work_add(struct task_struct *task, struct callback_head *work,
 		  enum task_work_notify_mode notify)
 {
 	struct callback_head *head;
-	int flags = notify & TWA_FLAGS;
 
-	notify &= ~TWA_FLAGS;
 	if (notify == TWA_NMI_CURRENT) {
 		if (WARN_ON_ONCE(task != current))
 			return -EINVAL;
 		if (!IS_ENABLED(CONFIG_IRQ_WORK))
 			return -EINVAL;
 	} else {
-		/*
-		 * Record the work call stack in order to print it in KASAN
-		 * reports.
-		 *
-		 * Note that stack allocation can fail if TWAF_NO_ALLOC flag
-		 * is set and new page is needed to expand the stack buffer.
-		 */
-		if (flags & TWAF_NO_ALLOC)
-			kasan_record_aux_stack_noalloc(work);
-		else
-			kasan_record_aux_stack(work);
+		kasan_record_aux_stack(work);
 	}
 
 	head = READ_ONCE(task->task_works);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9949ffad8df09..65b8314b2d538 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2180,7 +2180,7 @@  static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
 	debug_work_activate(work);
 
 	/* record the work call stack in order to print it in KASAN reports */
-	kasan_record_aux_stack_noalloc(work);
+	kasan_record_aux_stack(work);
 
 	/* we own @work, set data and link */
 	set_work_pwq(work, pwq, extra_flags);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 6310a180278b6..b18b5944997f8 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -521,7 +521,7 @@  size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
 			sizeof(struct kasan_free_meta) : 0);
 }
 
-static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
+void kasan_record_aux_stack(void *addr)
 {
 	struct slab *slab = kasan_addr_to_slab(addr);
 	struct kmem_cache *cache;
@@ -538,17 +538,7 @@  static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
 		return;
 
 	alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
-	alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
-}
-
-void kasan_record_aux_stack(void *addr)
-{
-	return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
-}
-
-void kasan_record_aux_stack_noalloc(void *addr)
-{
-	return __kasan_record_aux_stack(addr, 0);
+	alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
 }
 
 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
diff --git a/mm/slub.c b/mm/slub.c
index 5b832512044e3..b8c4bf3fe0d07 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2300,7 +2300,7 @@  bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
 			 * We have to do this manually because the rcu_head is
 			 * not located inside the object.
 			 */
-			kasan_record_aux_stack_noalloc(x);
+			kasan_record_aux_stack(x);
 
 			delayed_free->object = x;
 			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);