diff mbox series

[v2,1/2] mm: fix missing handler for __GFP_NOWARN

Message ID 20220511061951.1114-1-zhengqi.arch@bytedance.com (mailing list archive)
State New
Headers show
Series [v2,1/2] mm: fix missing handler for __GFP_NOWARN | expand

Commit Message

Qi Zheng May 11, 2022, 6:19 a.m. UTC
We expect no warnings to be issued when we specify __GFP_NOWARN, but
currently in paths like alloc_pages() and kmalloc(), there are still
some warnings printed, fix it.

But for some warnings that report usage problems, we don't deal with
them. If such warnings are printed, then we should fix the usage
problems. Such as the following case:

	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));

Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
---
 include/linux/fault-inject.h |  2 ++
 lib/fault-inject.c           |  3 +++
 mm/failslab.c                |  3 +++
 mm/internal.h                | 15 +++++++++++++++
 mm/page_alloc.c              | 18 ++++++++++--------
 5 files changed, 33 insertions(+), 8 deletions(-)

Comments

Qi Zheng May 11, 2022, 6:24 a.m. UTC | #1
On 2022/5/11 2:19 PM, Qi Zheng wrote:
> We expect no warnings to be issued when we specify __GFP_NOWARN, but
> currently in paths like alloc_pages() and kmalloc(), there are still
> some warnings printed, fix it.
> 
> But for some warnings that report usage problems, we don't deal with
> them. If such warnings are printed, then we should fix the usage
> problems. Such as the following case:
> 
> 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
> 
> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
> ---

Changelog in v1 -> v2:
  - add comment to WARN_ON_ONCE_GFP
  - handle __alloc_contig_migrate_range() case
  - do not deal with:
	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));

>   include/linux/fault-inject.h |  2 ++
>   lib/fault-inject.c           |  3 +++
>   mm/failslab.c                |  3 +++
>   mm/internal.h                | 15 +++++++++++++++
>   mm/page_alloc.c              | 18 ++++++++++--------
>   5 files changed, 33 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
> index 2d04f6448cde..9f6e25467844 100644
> --- a/include/linux/fault-inject.h
> +++ b/include/linux/fault-inject.h
> @@ -20,6 +20,7 @@ struct fault_attr {
>   	atomic_t space;
>   	unsigned long verbose;
>   	bool task_filter;
> +	bool no_warn;
>   	unsigned long stacktrace_depth;
>   	unsigned long require_start;
>   	unsigned long require_end;
> @@ -39,6 +40,7 @@ struct fault_attr {
>   		.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,	\
>   		.verbose = 2,						\
>   		.dname = NULL,						\
> +		.no_warn = false,					\
>   	}
>   
>   #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
> diff --git a/lib/fault-inject.c b/lib/fault-inject.c
> index ce12621b4275..423784d9c058 100644
> --- a/lib/fault-inject.c
> +++ b/lib/fault-inject.c
> @@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
>   
>   static void fail_dump(struct fault_attr *attr)
>   {
> +	if (attr->no_warn)
> +		return;
> +
>   	if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
>   		printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
>   		       "name %pd, interval %lu, probability %lu, "
> diff --git a/mm/failslab.c b/mm/failslab.c
> index f92fed91ac23..58df9789f1d2 100644
> --- a/mm/failslab.c
> +++ b/mm/failslab.c
> @@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
>   	if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
>   		return false;
>   
> +	if (gfpflags & __GFP_NOWARN)
> +		failslab.attr.no_warn = true;
> +
>   	return should_fail(&failslab.attr, s->object_size);
>   }
>   
> diff --git a/mm/internal.h b/mm/internal.h
> index e3e50af20706..34fdedb9986f 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -35,6 +35,21 @@ struct folio_batch;
>   /* Do not use these with a slab allocator */
>   #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
>   
> +/*
> + * Different from WARN_ON_ONCE(), no warning will be issued
> + * when we specify __GFP_NOWARN.
> + */
> +#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
> +	static bool __section(".data.once") __warned;			\
> +	int __ret_warn_once = !!(cond);					\
> +									\
> +	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
> +		__warned = true;					\
> +		WARN_ON(1);						\
> +	}								\
> +	unlikely(__ret_warn_once);					\
> +})
> +
>   void page_writeback_init(void);
>   
>   static inline void *folio_raw_mapping(struct folio *folio)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 65f892af1d4f..f9f329403d76 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3789,6 +3789,9 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
>   			(gfp_mask & __GFP_DIRECT_RECLAIM))
>   		return false;
>   
> +	if (gfp_mask & __GFP_NOWARN)
> +		fail_page_alloc.attr.no_warn = true;
> +
>   	return should_fail(&fail_page_alloc.attr, 1 << order);
>   }
>   
> @@ -4337,7 +4340,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
>   	 */
>   
>   	/* Exhausted what can be done so it's blame time */
> -	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
> +	if (out_of_memory(&oc) ||
> +	    WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
>   		*did_some_progress = 1;
>   
>   		/*
> @@ -5103,7 +5107,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   		 * All existing users of the __GFP_NOFAIL are blockable, so warn
>   		 * of any new users that actually require GFP_NOWAIT
>   		 */
> -		if (WARN_ON_ONCE(!can_direct_reclaim))
> +		if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>   			goto fail;
>   
>   		/*
> @@ -5111,7 +5115,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   		 * because we cannot reclaim anything and only can loop waiting
>   		 * for somebody to do a work for us
>   		 */
> -		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
> +		WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
>   
>   		/*
>   		 * non failing costly orders are a hard requirement which we
> @@ -5119,7 +5123,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   		 * so that we can identify them and convert them to something
>   		 * else.
>   		 */
> -		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
> +		WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
>   
>   		/*
>   		 * Help non-failing allocations by giving them access to memory
> @@ -5365,10 +5369,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
>   	 * There are several places where we assume that the order value is sane
>   	 * so bail out early if the request is out of bound.
>   	 */
> -	if (unlikely(order >= MAX_ORDER)) {
> -		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
> +	if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
>   		return NULL;
> -	}
>   
>   	gfp &= gfp_allowed_mask;
>   	/*
> @@ -9020,7 +9022,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
>   
>   	lru_cache_enable();
>   	if (ret < 0) {
> -		if (ret == -EBUSY)
> +		if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
>   			alloc_contig_dump_pages(&cc->migratepages);
>   		putback_movable_pages(&cc->migratepages);
>   		return ret;
diff mbox series

Patch

diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 2d04f6448cde..9f6e25467844 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -20,6 +20,7 @@  struct fault_attr {
 	atomic_t space;
 	unsigned long verbose;
 	bool task_filter;
+	bool no_warn;
 	unsigned long stacktrace_depth;
 	unsigned long require_start;
 	unsigned long require_end;
@@ -39,6 +40,7 @@  struct fault_attr {
 		.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,	\
 		.verbose = 2,						\
 		.dname = NULL,						\
+		.no_warn = false,					\
 	}
 
 #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index ce12621b4275..423784d9c058 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,6 +41,9 @@  EXPORT_SYMBOL_GPL(setup_fault_attr);
 
 static void fail_dump(struct fault_attr *attr)
 {
+	if (attr->no_warn)
+		return;
+
 	if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
 		printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
 		       "name %pd, interval %lu, probability %lu, "
diff --git a/mm/failslab.c b/mm/failslab.c
index f92fed91ac23..58df9789f1d2 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -30,6 +30,9 @@  bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
 	if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
 		return false;
 
+	if (gfpflags & __GFP_NOWARN)
+		failslab.attr.no_warn = true;
+
 	return should_fail(&failslab.attr, s->object_size);
 }
 
diff --git a/mm/internal.h b/mm/internal.h
index e3e50af20706..34fdedb9986f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -35,6 +35,21 @@  struct folio_batch;
 /* Do not use these with a slab allocator */
 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
+/*
+ * Different from WARN_ON_ONCE(), no warning will be issued
+ * when we specify __GFP_NOWARN.
+ */
+#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
+	static bool __section(".data.once") __warned;			\
+	int __ret_warn_once = !!(cond);					\
+									\
+	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
+		__warned = true;					\
+		WARN_ON(1);						\
+	}								\
+	unlikely(__ret_warn_once);					\
+})
+
 void page_writeback_init(void);
 
 static inline void *folio_raw_mapping(struct folio *folio)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 65f892af1d4f..f9f329403d76 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3789,6 +3789,9 @@  static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 			(gfp_mask & __GFP_DIRECT_RECLAIM))
 		return false;
 
+	if (gfp_mask & __GFP_NOWARN)
+		fail_page_alloc.attr.no_warn = true;
+
 	return should_fail(&fail_page_alloc.attr, 1 << order);
 }
 
@@ -4337,7 +4340,8 @@  __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 	 */
 
 	/* Exhausted what can be done so it's blame time */
-	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
+	if (out_of_memory(&oc) ||
+	    WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
 		*did_some_progress = 1;
 
 		/*
@@ -5103,7 +5107,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
 		 * of any new users that actually require GFP_NOWAIT
 		 */
-		if (WARN_ON_ONCE(!can_direct_reclaim))
+		if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
 			goto fail;
 
 		/*
@@ -5111,7 +5115,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		 * because we cannot reclaim anything and only can loop waiting
 		 * for somebody to do a work for us
 		 */
-		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
+		WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
 
 		/*
 		 * non failing costly orders are a hard requirement which we
@@ -5119,7 +5123,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		 * so that we can identify them and convert them to something
 		 * else.
 		 */
-		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
+		WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
 
 		/*
 		 * Help non-failing allocations by giving them access to memory
@@ -5365,10 +5369,8 @@  struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 	 * There are several places where we assume that the order value is sane
 	 * so bail out early if the request is out of bound.
 	 */
-	if (unlikely(order >= MAX_ORDER)) {
-		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
+	if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
 		return NULL;
-	}
 
 	gfp &= gfp_allowed_mask;
 	/*
@@ -9020,7 +9022,7 @@  int __alloc_contig_migrate_range(struct compact_control *cc,
 
 	lru_cache_enable();
 	if (ret < 0) {
-		if (ret == -EBUSY)
+		if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
 			alloc_contig_dump_pages(&cc->migratepages);
 		putback_movable_pages(&cc->migratepages);
 		return ret;