Message ID | 20220401181109.1477354-1-zi.yan@sent.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2,1/2] mm: page_alloc: simplify pageblock migratetype check in __free_one_page(). | expand |
On 4/1/22 20:11, Zi Yan wrote: > From: Zi Yan <ziy@nvidia.com> > > Move pageblock migratetype check code in the while loop to simplify the > logic. It also saves redundant buddy page checking code. > > Suggested-by: Vlastimil Babka <vbabka@suse.cz> > Link: https://lore.kernel.org/linux-mm/27ff69f9-60c5-9e59-feb2-295250077551@suse.cz/ > Signed-off-by: Zi Yan <ziy@nvidia.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> > --- > mm/page_alloc.c | 46 +++++++++++++++++----------------------------- > 1 file changed, 17 insertions(+), 29 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 856473e54155..2ea106146686 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -1054,7 +1054,6 @@ static inline void __free_one_page(struct page *page, > int migratetype, fpi_t fpi_flags) > { > struct capture_control *capc = task_capc(zone); > - unsigned int max_order = pageblock_order; > unsigned long buddy_pfn; > unsigned long combined_pfn; > struct page *buddy; > @@ -1070,8 +1069,7 @@ static inline void __free_one_page(struct page *page, > VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); > VM_BUG_ON_PAGE(bad_range(zone, page), page); > > -continue_merging: > - while (order < max_order) { > + while (order < MAX_ORDER - 1) { > if (compaction_capture(capc, page, order, migratetype)) { > __mod_zone_freepage_state(zone, -(1 << order), > migratetype); > @@ -1082,6 +1080,22 @@ static inline void __free_one_page(struct page *page, > > if (!page_is_buddy(page, buddy, order)) > goto done_merging; > + > + if (unlikely(order >= pageblock_order)) { > + /* > + * We want to prevent merge between freepages on pageblock > + * without fallbacks and normal pageblock. Without this, > + * pageblock isolation could cause incorrect freepage or CMA > + * accounting or HIGHATOMIC accounting. > + */ > + int buddy_mt = get_pageblock_migratetype(buddy); > + > + if (migratetype != buddy_mt > + && (!migratetype_is_mergeable(migratetype) || > + !migratetype_is_mergeable(buddy_mt))) > + goto done_merging; > + } > + > /* > * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, > * merge with it and move up one order. > @@ -1095,32 +1109,6 @@ static inline void __free_one_page(struct page *page, > pfn = combined_pfn; > order++; > } > - if (order < MAX_ORDER - 1) { > - /* If we are here, it means order is >= pageblock_order. > - * We want to prevent merge between freepages on pageblock > - * without fallbacks and normal pageblock. Without this, > - * pageblock isolation could cause incorrect freepage or CMA > - * accounting or HIGHATOMIC accounting. > - * > - * We don't want to hit this code for the more frequent > - * low-order merging. > - */ > - int buddy_mt; > - > - buddy_pfn = __find_buddy_pfn(pfn, order); > - buddy = page + (buddy_pfn - pfn); > - > - if (!page_is_buddy(page, buddy, order)) > - goto done_merging; > - buddy_mt = get_pageblock_migratetype(buddy); > - > - if (migratetype != buddy_mt > - && (!migratetype_is_mergeable(migratetype) || > - !migratetype_is_mergeable(buddy_mt))) > - goto done_merging; > - max_order = order + 1; > - goto continue_merging; > - } > > done_merging: > set_buddy_order(page, order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 856473e54155..2ea106146686 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1054,7 +1054,6 @@ static inline void __free_one_page(struct page *page, int migratetype, fpi_t fpi_flags) { struct capture_control *capc = task_capc(zone); - unsigned int max_order = pageblock_order; unsigned long buddy_pfn; unsigned long combined_pfn; struct page *buddy; @@ -1070,8 +1069,7 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); -continue_merging: - while (order < max_order) { + while (order < MAX_ORDER - 1) { if (compaction_capture(capc, page, order, migratetype)) { __mod_zone_freepage_state(zone, -(1 << order), migratetype); @@ -1082,6 +1080,22 @@ static inline void __free_one_page(struct page *page, if (!page_is_buddy(page, buddy, order)) goto done_merging; + + if (unlikely(order >= pageblock_order)) { + /* + * We want to prevent merge between freepages on pageblock + * without fallbacks and normal pageblock. Without this, + * pageblock isolation could cause incorrect freepage or CMA + * accounting or HIGHATOMIC accounting. + */ + int buddy_mt = get_pageblock_migratetype(buddy); + + if (migratetype != buddy_mt + && (!migratetype_is_mergeable(migratetype) || + !migratetype_is_mergeable(buddy_mt))) + goto done_merging; + } + /* * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * merge with it and move up one order. @@ -1095,32 +1109,6 @@ static inline void __free_one_page(struct page *page, pfn = combined_pfn; order++; } - if (order < MAX_ORDER - 1) { - /* If we are here, it means order is >= pageblock_order. - * We want to prevent merge between freepages on pageblock - * without fallbacks and normal pageblock. Without this, - * pageblock isolation could cause incorrect freepage or CMA - * accounting or HIGHATOMIC accounting. - * - * We don't want to hit this code for the more frequent - * low-order merging. - */ - int buddy_mt; - - buddy_pfn = __find_buddy_pfn(pfn, order); - buddy = page + (buddy_pfn - pfn); - - if (!page_is_buddy(page, buddy, order)) - goto done_merging; - buddy_mt = get_pageblock_migratetype(buddy); - - if (migratetype != buddy_mt - && (!migratetype_is_mergeable(migratetype) || - !migratetype_is_mergeable(buddy_mt))) - goto done_merging; - max_order = order + 1; - goto continue_merging; - } done_merging: set_buddy_order(page, order);