diff mbox series

[v2] mm,page_owner: Fix recursion

Message ID 20240315222610.6870-1-osalvador@suse.de (mailing list archive)
State New
Headers show
Series [v2] mm,page_owner: Fix recursion | expand

Commit Message

Oscar Salvador March 15, 2024, 10:26 p.m. UTC
Prior to 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count")
the only place where page_owner could potentially go into recursion due to
its need of allocating more memory was in save_stack(), which ends up calling
into stackdepot code with the possibility of allocating memory.

We made sure to guard against that by signaling that the current task was
already in page_owner code, so in case a recursion attempt was made, we
could catch that and return dummy_handle.

After above commit, a new place in page_owner code was introduced where we
could allocate memory, meaning we could go into recursion would we take that
path.

Make sure to signal that we are in page_owner in that codepath as well.
Move the guard code into two helpers {un}set_current_in_page_owner()
and use them prior to calling in the two functions that might allocate
memory.

Signed-off-by: Oscar Salvador <osalvador@suse.de>
Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count")
---
Changes from v1 -> v2:
 Wrap {un}set_current_in_page_owner directly around kmalloc call
 (Vlastimil feedback)
---
 mm/page_owner.c | 33 +++++++++++++++++++++++----------
 1 file changed, 23 insertions(+), 10 deletions(-)

Comments

Vlastimil Babka March 18, 2024, 8:48 a.m. UTC | #1
On 3/15/24 23:26, Oscar Salvador wrote:
> Prior to 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count")
> the only place where page_owner could potentially go into recursion due to
> its need of allocating more memory was in save_stack(), which ends up calling
> into stackdepot code with the possibility of allocating memory.
> 
> We made sure to guard against that by signaling that the current task was
> already in page_owner code, so in case a recursion attempt was made, we
> could catch that and return dummy_handle.
> 
> After above commit, a new place in page_owner code was introduced where we
> could allocate memory, meaning we could go into recursion would we take that
> path.
> 
> Make sure to signal that we are in page_owner in that codepath as well.
> Move the guard code into two helpers {un}set_current_in_page_owner()
> and use them prior to calling in the two functions that might allocate
> memory.
> 
> Signed-off-by: Oscar Salvador <osalvador@suse.de>
> Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count")

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
> Changes from v1 -> v2:
>  Wrap {un}set_current_in_page_owner directly around kmalloc call
>  (Vlastimil feedback)
> ---
>  mm/page_owner.c | 33 +++++++++++++++++++++++----------
>  1 file changed, 23 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/page_owner.c b/mm/page_owner.c
> index e96dd9092658..cde1ee0f9005 100644
> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -54,6 +54,22 @@ static depot_stack_handle_t early_handle;
>  
>  static void init_early_allocated_pages(void);
>  
> +static inline void set_current_in_page_owner(void)
> +{
> +	/*
> +	 * Avoid recursion.
> +	 *
> +	 * We might need to allocate more memory from page_owner code, so make
> +	 * sure to signal it in order to avoid recursion.
> +	 */
> +	current->in_page_owner = 1;
> +}
> +
> +static inline void unset_current_in_page_owner(void)
> +{
> +	current->in_page_owner = 0;
> +}
> +
>  static int __init early_page_owner_param(char *buf)
>  {
>  	int ret = kstrtobool(buf, &page_owner_enabled);
> @@ -133,23 +149,16 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
>  	depot_stack_handle_t handle;
>  	unsigned int nr_entries;
>  
> -	/*
> -	 * Avoid recursion.
> -	 *
> -	 * Sometimes page metadata allocation tracking requires more
> -	 * memory to be allocated:
> -	 * - when new stack trace is saved to stack depot
> -	 */
>  	if (current->in_page_owner)
>  		return dummy_handle;
> -	current->in_page_owner = 1;
>  
> +	set_current_in_page_owner();
>  	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
>  	handle = stack_depot_save(entries, nr_entries, flags);
>  	if (!handle)
>  		handle = failure_handle;
> +	unset_current_in_page_owner();
>  
> -	current->in_page_owner = 0;
>  	return handle;
>  }
>  
> @@ -164,9 +173,13 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
>  	gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
>  	gfp_mask |= __GFP_NOWARN;
>  
> +	set_current_in_page_owner();
>  	stack = kmalloc(sizeof(*stack), gfp_mask);
> -	if (!stack)
> +	if (!stack) {
> +		unset_current_in_page_owner();
>  		return;
> +	}
> +	unset_current_in_page_owner();
>  
>  	stack->stack_record = stack_record;
>  	stack->next = NULL;
diff mbox series

Patch

diff --git a/mm/page_owner.c b/mm/page_owner.c
index e96dd9092658..cde1ee0f9005 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -54,6 +54,22 @@  static depot_stack_handle_t early_handle;
 
 static void init_early_allocated_pages(void);
 
+static inline void set_current_in_page_owner(void)
+{
+	/*
+	 * Avoid recursion.
+	 *
+	 * We might need to allocate more memory from page_owner code, so make
+	 * sure to signal it in order to avoid recursion.
+	 */
+	current->in_page_owner = 1;
+}
+
+static inline void unset_current_in_page_owner(void)
+{
+	current->in_page_owner = 0;
+}
+
 static int __init early_page_owner_param(char *buf)
 {
 	int ret = kstrtobool(buf, &page_owner_enabled);
@@ -133,23 +149,16 @@  static noinline depot_stack_handle_t save_stack(gfp_t flags)
 	depot_stack_handle_t handle;
 	unsigned int nr_entries;
 
-	/*
-	 * Avoid recursion.
-	 *
-	 * Sometimes page metadata allocation tracking requires more
-	 * memory to be allocated:
-	 * - when new stack trace is saved to stack depot
-	 */
 	if (current->in_page_owner)
 		return dummy_handle;
-	current->in_page_owner = 1;
 
+	set_current_in_page_owner();
 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
 	handle = stack_depot_save(entries, nr_entries, flags);
 	if (!handle)
 		handle = failure_handle;
+	unset_current_in_page_owner();
 
-	current->in_page_owner = 0;
 	return handle;
 }
 
@@ -164,9 +173,13 @@  static void add_stack_record_to_list(struct stack_record *stack_record,
 	gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
 	gfp_mask |= __GFP_NOWARN;
 
+	set_current_in_page_owner();
 	stack = kmalloc(sizeof(*stack), gfp_mask);
-	if (!stack)
+	if (!stack) {
+		unset_current_in_page_owner();
 		return;
+	}
+	unset_current_in_page_owner();
 
 	stack->stack_record = stack_record;
 	stack->next = NULL;