diff mbox series

[RFC,01/37] mm: page_alloc: Rename gfp_to_alloc_flags_cma -> gfp_to_alloc_flags_fast

Message ID 20230823131350.114942-2-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series [RFC,01/37] mm: page_alloc: Rename gfp_to_alloc_flags_cma -> gfp_to_alloc_flags_fast | expand

Commit Message

Alexandru Elisei Aug. 23, 2023, 1:13 p.m. UTC
gfp_to_alloc_flags_cma() is called on the fast path of the page allocator
and all it does is set the ALLOC_CMA flag if all the conditions are met for
the allocation to be satisfied from the MIGRATE_CMA list. Rename it to be
more generic, as it will soon have to handle another another flag.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 mm/page_alloc.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d3460c7a480..e6f950c54494 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3081,7 +3081,7 @@  alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
 }
 
 /* Must be called after current_gfp_context() which can change gfp_mask */
-static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
+static inline unsigned int gfp_to_alloc_flags_fast(gfp_t gfp_mask,
 						  unsigned int alloc_flags)
 {
 #ifdef CONFIG_CMA
@@ -3784,7 +3784,7 @@  gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
 	} else if (unlikely(rt_task(current)) && in_task())
 		alloc_flags |= ALLOC_MIN_RESERVE;
 
-	alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
+	alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, alloc_flags);
 
 	return alloc_flags;
 }
@@ -4074,7 +4074,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 
 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
 	if (reserve_flags)
-		alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
+		alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, reserve_flags) |
 					  (alloc_flags & ALLOC_KSWAPD);
 
 	/*
@@ -4250,7 +4250,7 @@  static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 	if (should_fail_alloc_page(gfp_mask, order))
 		return false;
 
-	*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
+	*alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, *alloc_flags);
 
 	/* Dirty zone balancing only done in the fast path */
 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);