Message ID | 20200928182110.7050-4-david@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm: place pages to the freelist tail when onling and undoing isolation | expand |
> Page isolation doesn't actually touch the pages, it simply isolates > pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. > > We already place pages to the tail of the freelists when undoing > isolation via __putback_isolated_page(), let's do it in any case > (e.g., if order <= pageblock_order) and document the behavior. > > Add a "to_tail" parameter to move_freepages_block() but introduce a > a new move_to_free_list_tail() - similar to add_to_free_list_tail(). > > This change results in all pages getting onlined via online_pages() to > be placed to the tail of the freelist. > > Reviewed-by: Oscar Salvador <osalvador@suse.de> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> > Cc: Mel Gorman <mgorman@techsingularity.net> > Cc: Michal Hocko <mhocko@kernel.org> > Cc: Dave Hansen <dave.hansen@intel.com> > Cc: Vlastimil Babka <vbabka@suse.cz> > Cc: Wei Yang <richard.weiyang@linux.alibaba.com> > Cc: Oscar Salvador <osalvador@suse.de> > Cc: Mike Rapoport <rppt@kernel.org> > Cc: Scott Cheloha <cheloha@linux.ibm.com> > Cc: Michael Ellerman <mpe@ellerman.id.au> > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > include/linux/page-isolation.h | 4 ++-- > mm/page_alloc.c | 35 +++++++++++++++++++++++----------- > mm/page_isolation.c | 12 +++++++++--- > 3 files changed, 35 insertions(+), 16 deletions(-) > > diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h > index 572458016331..3eca9b3c5305 100644 > --- a/include/linux/page-isolation.h > +++ b/include/linux/page-isolation.h > @@ -36,8 +36,8 @@ static inline bool is_migrate_isolate(int migratetype) > struct page *has_unmovable_pages(struct zone *zone, struct page *page, > int migratetype, int flags); > void set_pageblock_migratetype(struct page *page, int migratetype); > -int move_freepages_block(struct zone *zone, struct page *page, > - int migratetype, int *num_movable); > +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, > + bool to_tail, int *num_movable); > > /* > * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 9e3ed4a6f69a..d5a5f528b8ca 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -905,6 +905,15 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, > list_move(&page->lru, &area->free_list[migratetype]); > } > > +/* Used for pages which are on another list */ > +static inline void move_to_free_list_tail(struct page *page, struct zone *zone, > + unsigned int order, int migratetype) > +{ > + struct free_area *area = &zone->free_area[order]; > + > + list_move_tail(&page->lru, &area->free_list[migratetype]); > +} > + > static inline void del_page_from_free_list(struct page *page, struct zone *zone, > unsigned int order) > { > @@ -2338,9 +2347,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, > * Note that start_page and end_pages are not aligned on a pageblock > * boundary. If alignment is required, use move_freepages_block() > */ > -static int move_freepages(struct zone *zone, > - struct page *start_page, struct page *end_page, > - int migratetype, int *num_movable) > +static int move_freepages(struct zone *zone, struct page *start_page, > + struct page *end_page, int migratetype, > + bool to_tail, int *num_movable) > { > struct page *page; > unsigned int order; > @@ -2371,7 +2380,10 @@ static int move_freepages(struct zone *zone, > VM_BUG_ON_PAGE(page_zone(page) != zone, page); > > order = page_order(page); > - move_to_free_list(page, zone, order, migratetype); > + if (to_tail) > + move_to_free_list_tail(page, zone, order, migratetype); > + else > + move_to_free_list(page, zone, order, migratetype); > page += 1 << order; > pages_moved += 1 << order; > } > @@ -2379,8 +2391,8 @@ static int move_freepages(struct zone *zone, > return pages_moved; > } > > -int move_freepages_block(struct zone *zone, struct page *page, > - int migratetype, int *num_movable) > +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, > + bool to_tail, int *num_movable) > { > unsigned long start_pfn, end_pfn; > struct page *start_page, *end_page; > @@ -2401,7 +2413,7 @@ int move_freepages_block(struct zone *zone, struct page *page, > return 0; > > return move_freepages(zone, start_page, end_page, migratetype, > - num_movable); > + to_tail, num_movable); > } > > static void change_pageblock_range(struct page *pageblock_page, > @@ -2526,8 +2538,8 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, > if (!whole_block) > goto single_page; > > - free_pages = move_freepages_block(zone, page, start_type, > - &movable_pages); > + free_pages = move_freepages_block(zone, page, start_type, false, > + &movable_pages); > /* > * Determine how many pages are compatible with our allocation. > * For movable allocation, it's the number of movable pages which > @@ -2635,7 +2647,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, > && !is_migrate_cma(mt)) { > zone->nr_reserved_highatomic += pageblock_nr_pages; > set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); > - move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); > + move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, false, > + NULL); > } > > out_unlock: > @@ -2711,7 +2724,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, > */ > set_pageblock_migratetype(page, ac->migratetype); > ret = move_freepages_block(zone, page, ac->migratetype, > - NULL); > + false, NULL); > if (ret) { > spin_unlock_irqrestore(&zone->lock, flags); > return ret; > diff --git a/mm/page_isolation.c b/mm/page_isolation.c > index abfe26ad59fd..de44e1329706 100644 > --- a/mm/page_isolation.c > +++ b/mm/page_isolation.c > @@ -45,7 +45,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ > set_pageblock_migratetype(page, MIGRATE_ISOLATE); > zone->nr_isolate_pageblock++; > nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, > - NULL); > + false, NULL); > > __mod_zone_freepage_state(zone, -nr_pages, mt); > spin_unlock_irqrestore(&zone->lock, flags); > @@ -83,7 +83,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) > * Because freepage with more than pageblock_order on isolated > * pageblock is restricted to merge due to freepage counting problem, > * it is possible that there is free buddy page. > - * move_freepages_block() doesn't care of merge so we need other > + * move_freepages_block() don't care about merging, so we need another > * approach in order to merge them. Isolation and free will make > * these pages to be merged. > */ > @@ -106,9 +106,15 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) > * If we isolate freepage with more than pageblock_order, there > * should be no freepage in the range, so we could avoid costly > * pageblock scanning for freepage moving. > + * > + * We didn't actually touch any of the isolated pages, so place them > + * to the tail of the freelist. This is an optimization for memory > + * onlining - just onlined memory won't immediately be considered for > + * allocation. > */ > if (!isolated_page) { > - nr_pages = move_freepages_block(zone, page, migratetype, NULL); > + nr_pages = move_freepages_block(zone, page, migratetype, true, > + NULL); > __mod_zone_freepage_state(zone, nr_pages, migratetype); > } > set_pageblock_migratetype(page, migratetype); Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
On Mon, Sep 28, 2020 at 08:21:08PM +0200, David Hildenbrand wrote: >Page isolation doesn't actually touch the pages, it simply isolates >pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. > >We already place pages to the tail of the freelists when undoing >isolation via __putback_isolated_page(), let's do it in any case >(e.g., if order <= pageblock_order) and document the behavior. > >Add a "to_tail" parameter to move_freepages_block() but introduce a >a new move_to_free_list_tail() - similar to add_to_free_list_tail(). > >This change results in all pages getting onlined via online_pages() to >be placed to the tail of the freelist. > >Reviewed-by: Oscar Salvador <osalvador@suse.de> >Cc: Andrew Morton <akpm@linux-foundation.org> >Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> >Cc: Mel Gorman <mgorman@techsingularity.net> >Cc: Michal Hocko <mhocko@kernel.org> >Cc: Dave Hansen <dave.hansen@intel.com> >Cc: Vlastimil Babka <vbabka@suse.cz> >Cc: Wei Yang <richard.weiyang@linux.alibaba.com> >Cc: Oscar Salvador <osalvador@suse.de> >Cc: Mike Rapoport <rppt@kernel.org> >Cc: Scott Cheloha <cheloha@linux.ibm.com> >Cc: Michael Ellerman <mpe@ellerman.id.au> >Signed-off-by: David Hildenbrand <david@redhat.com> >--- > include/linux/page-isolation.h | 4 ++-- > mm/page_alloc.c | 35 +++++++++++++++++++++++----------- > mm/page_isolation.c | 12 +++++++++--- > 3 files changed, 35 insertions(+), 16 deletions(-) > >diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h >index 572458016331..3eca9b3c5305 100644 >--- a/include/linux/page-isolation.h >+++ b/include/linux/page-isolation.h >@@ -36,8 +36,8 @@ static inline bool is_migrate_isolate(int migratetype) > struct page *has_unmovable_pages(struct zone *zone, struct page *page, > int migratetype, int flags); > void set_pageblock_migratetype(struct page *page, int migratetype); >-int move_freepages_block(struct zone *zone, struct page *page, >- int migratetype, int *num_movable); >+int move_freepages_block(struct zone *zone, struct page *page, int migratetype, >+ bool to_tail, int *num_movable); > > /* > * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. >diff --git a/mm/page_alloc.c b/mm/page_alloc.c >index 9e3ed4a6f69a..d5a5f528b8ca 100644 >--- a/mm/page_alloc.c >+++ b/mm/page_alloc.c >@@ -905,6 +905,15 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, > list_move(&page->lru, &area->free_list[migratetype]); > } > >+/* Used for pages which are on another list */ >+static inline void move_to_free_list_tail(struct page *page, struct zone *zone, >+ unsigned int order, int migratetype) >+{ >+ struct free_area *area = &zone->free_area[order]; >+ >+ list_move_tail(&page->lru, &area->free_list[migratetype]); >+} >+ Would it be better to pass the *to_tail* to move_to_free_list(), so we won't have a new function? > static inline void del_page_from_free_list(struct page *page, struct zone *zone, > unsigned int order) > { >@@ -2338,9 +2347,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, > * Note that start_page and end_pages are not aligned on a pageblock > * boundary. If alignment is required, use move_freepages_block() > */ >-static int move_freepages(struct zone *zone, >- struct page *start_page, struct page *end_page, >- int migratetype, int *num_movable) >+static int move_freepages(struct zone *zone, struct page *start_page, >+ struct page *end_page, int migratetype, >+ bool to_tail, int *num_movable) > { > struct page *page; > unsigned int order; >@@ -2371,7 +2380,10 @@ static int move_freepages(struct zone *zone, > VM_BUG_ON_PAGE(page_zone(page) != zone, page); > > order = page_order(page); >- move_to_free_list(page, zone, order, migratetype); >+ if (to_tail) >+ move_to_free_list_tail(page, zone, order, migratetype); >+ else >+ move_to_free_list(page, zone, order, migratetype); And here, we just need to pass the *to_tail* to move_to_free_list(). > page += 1 << order; > pages_moved += 1 << order; > } >@@ -2379,8 +2391,8 @@ static int move_freepages(struct zone *zone, > return pages_moved; > } > >-int move_freepages_block(struct zone *zone, struct page *page, >- int migratetype, int *num_movable) >+int move_freepages_block(struct zone *zone, struct page *page, int migratetype, >+ bool to_tail, int *num_movable) > { > unsigned long start_pfn, end_pfn; > struct page *start_page, *end_page; >@@ -2401,7 +2413,7 @@ int move_freepages_block(struct zone *zone, struct page *page, > return 0; > > return move_freepages(zone, start_page, end_page, migratetype, >- num_movable); >+ to_tail, num_movable); > } > > static void change_pageblock_range(struct page *pageblock_page, >@@ -2526,8 +2538,8 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, > if (!whole_block) > goto single_page; > >- free_pages = move_freepages_block(zone, page, start_type, >- &movable_pages); >+ free_pages = move_freepages_block(zone, page, start_type, false, >+ &movable_pages); > /* > * Determine how many pages are compatible with our allocation. > * For movable allocation, it's the number of movable pages which >@@ -2635,7 +2647,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, > && !is_migrate_cma(mt)) { > zone->nr_reserved_highatomic += pageblock_nr_pages; > set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); >- move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); >+ move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, false, >+ NULL); > } > > out_unlock: >@@ -2711,7 +2724,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, > */ > set_pageblock_migratetype(page, ac->migratetype); > ret = move_freepages_block(zone, page, ac->migratetype, >- NULL); >+ false, NULL); > if (ret) { > spin_unlock_irqrestore(&zone->lock, flags); > return ret; >diff --git a/mm/page_isolation.c b/mm/page_isolation.c >index abfe26ad59fd..de44e1329706 100644 >--- a/mm/page_isolation.c >+++ b/mm/page_isolation.c >@@ -45,7 +45,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ > set_pageblock_migratetype(page, MIGRATE_ISOLATE); > zone->nr_isolate_pageblock++; > nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, >- NULL); >+ false, NULL); > > __mod_zone_freepage_state(zone, -nr_pages, mt); > spin_unlock_irqrestore(&zone->lock, flags); >@@ -83,7 +83,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) > * Because freepage with more than pageblock_order on isolated > * pageblock is restricted to merge due to freepage counting problem, > * it is possible that there is free buddy page. >- * move_freepages_block() doesn't care of merge so we need other >+ * move_freepages_block() don't care about merging, so we need another > * approach in order to merge them. Isolation and free will make > * these pages to be merged. > */ >@@ -106,9 +106,15 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) > * If we isolate freepage with more than pageblock_order, there > * should be no freepage in the range, so we could avoid costly > * pageblock scanning for freepage moving. >+ * >+ * We didn't actually touch any of the isolated pages, so place them >+ * to the tail of the freelist. This is an optimization for memory >+ * onlining - just onlined memory won't immediately be considered for >+ * allocation. > */ > if (!isolated_page) { >- nr_pages = move_freepages_block(zone, page, migratetype, NULL); >+ nr_pages = move_freepages_block(zone, page, migratetype, true, >+ NULL); > __mod_zone_freepage_state(zone, nr_pages, migratetype); > } > set_pageblock_migratetype(page, migratetype); >-- >2.26.2 Others looks good to me.
On 29.09.20 11:18, Wei Yang wrote: > On Mon, Sep 28, 2020 at 08:21:08PM +0200, David Hildenbrand wrote: >> Page isolation doesn't actually touch the pages, it simply isolates >> pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. >> >> We already place pages to the tail of the freelists when undoing >> isolation via __putback_isolated_page(), let's do it in any case >> (e.g., if order <= pageblock_order) and document the behavior. >> >> Add a "to_tail" parameter to move_freepages_block() but introduce a >> a new move_to_free_list_tail() - similar to add_to_free_list_tail(). s/a a/a/ >> >> This change results in all pages getting onlined via online_pages() to >> be placed to the tail of the freelist. >> >> Reviewed-by: Oscar Salvador <osalvador@suse.de> >> Cc: Andrew Morton <akpm@linux-foundation.org> >> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> >> Cc: Mel Gorman <mgorman@techsingularity.net> >> Cc: Michal Hocko <mhocko@kernel.org> >> Cc: Dave Hansen <dave.hansen@intel.com> >> Cc: Vlastimil Babka <vbabka@suse.cz> >> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> >> Cc: Oscar Salvador <osalvador@suse.de> >> Cc: Mike Rapoport <rppt@kernel.org> >> Cc: Scott Cheloha <cheloha@linux.ibm.com> >> Cc: Michael Ellerman <mpe@ellerman.id.au> >> Signed-off-by: David Hildenbrand <david@redhat.com> >> --- >> include/linux/page-isolation.h | 4 ++-- >> mm/page_alloc.c | 35 +++++++++++++++++++++++----------- >> mm/page_isolation.c | 12 +++++++++--- >> 3 files changed, 35 insertions(+), 16 deletions(-) >> >> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h >> index 572458016331..3eca9b3c5305 100644 >> --- a/include/linux/page-isolation.h >> +++ b/include/linux/page-isolation.h >> @@ -36,8 +36,8 @@ static inline bool is_migrate_isolate(int migratetype) >> struct page *has_unmovable_pages(struct zone *zone, struct page *page, >> int migratetype, int flags); >> void set_pageblock_migratetype(struct page *page, int migratetype); >> -int move_freepages_block(struct zone *zone, struct page *page, >> - int migratetype, int *num_movable); >> +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, >> + bool to_tail, int *num_movable); >> >> /* >> * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. >> diff --git a/mm/page_alloc.c b/mm/page_alloc.c >> index 9e3ed4a6f69a..d5a5f528b8ca 100644 >> --- a/mm/page_alloc.c >> +++ b/mm/page_alloc.c >> @@ -905,6 +905,15 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, >> list_move(&page->lru, &area->free_list[migratetype]); >> } >> >> +/* Used for pages which are on another list */ >> +static inline void move_to_free_list_tail(struct page *page, struct zone *zone, >> + unsigned int order, int migratetype) >> +{ >> + struct free_area *area = &zone->free_area[order]; >> + >> + list_move_tail(&page->lru, &area->free_list[migratetype]); >> +} >> + > > Would it be better to pass the *to_tail* to move_to_free_list(), so we won't > have a new function? Hi, thanks for the review! See discussion in RFC + cover letter: "Add a "to_tail" parameter to move_freepages_block() but introduce a new move_to_free_list_tail() - similar to add_to_free_list_tail()."
On Tue, Sep 29, 2020 at 12:12:14PM +0200, David Hildenbrand wrote: >On 29.09.20 11:18, Wei Yang wrote: >> On Mon, Sep 28, 2020 at 08:21:08PM +0200, David Hildenbrand wrote: >>> Page isolation doesn't actually touch the pages, it simply isolates >>> pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. >>> >>> We already place pages to the tail of the freelists when undoing >>> isolation via __putback_isolated_page(), let's do it in any case >>> (e.g., if order <= pageblock_order) and document the behavior. >>> >>> Add a "to_tail" parameter to move_freepages_block() but introduce a >>> a new move_to_free_list_tail() - similar to add_to_free_list_tail(). > >s/a a/a/ > >>> >>> This change results in all pages getting onlined via online_pages() to >>> be placed to the tail of the freelist. >>> >>> Reviewed-by: Oscar Salvador <osalvador@suse.de> >>> Cc: Andrew Morton <akpm@linux-foundation.org> >>> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> >>> Cc: Mel Gorman <mgorman@techsingularity.net> >>> Cc: Michal Hocko <mhocko@kernel.org> >>> Cc: Dave Hansen <dave.hansen@intel.com> >>> Cc: Vlastimil Babka <vbabka@suse.cz> >>> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> >>> Cc: Oscar Salvador <osalvador@suse.de> >>> Cc: Mike Rapoport <rppt@kernel.org> >>> Cc: Scott Cheloha <cheloha@linux.ibm.com> >>> Cc: Michael Ellerman <mpe@ellerman.id.au> >>> Signed-off-by: David Hildenbrand <david@redhat.com> >>> --- >>> include/linux/page-isolation.h | 4 ++-- >>> mm/page_alloc.c | 35 +++++++++++++++++++++++----------- >>> mm/page_isolation.c | 12 +++++++++--- >>> 3 files changed, 35 insertions(+), 16 deletions(-) >>> >>> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h >>> index 572458016331..3eca9b3c5305 100644 >>> --- a/include/linux/page-isolation.h >>> +++ b/include/linux/page-isolation.h >>> @@ -36,8 +36,8 @@ static inline bool is_migrate_isolate(int migratetype) >>> struct page *has_unmovable_pages(struct zone *zone, struct page *page, >>> int migratetype, int flags); >>> void set_pageblock_migratetype(struct page *page, int migratetype); >>> -int move_freepages_block(struct zone *zone, struct page *page, >>> - int migratetype, int *num_movable); >>> +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, >>> + bool to_tail, int *num_movable); >>> >>> /* >>> * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. >>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c >>> index 9e3ed4a6f69a..d5a5f528b8ca 100644 >>> --- a/mm/page_alloc.c >>> +++ b/mm/page_alloc.c >>> @@ -905,6 +905,15 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, >>> list_move(&page->lru, &area->free_list[migratetype]); >>> } >>> >>> +/* Used for pages which are on another list */ >>> +static inline void move_to_free_list_tail(struct page *page, struct zone *zone, >>> + unsigned int order, int migratetype) >>> +{ >>> + struct free_area *area = &zone->free_area[order]; >>> + >>> + list_move_tail(&page->lru, &area->free_list[migratetype]); >>> +} >>> + >> >> Would it be better to pass the *to_tail* to move_to_free_list(), so we won't >> have a new function? > >Hi, > >thanks for the review! > >See discussion in RFC + cover letter: > >"Add a "to_tail" parameter to move_freepages_block() but introduce a new >move_to_free_list_tail() - similar to add_to_free_list_tail()." Hmm, sounds reasonable. Reviewed-by: Wei Yang <richard.weiyang@linux.alibaba.com>
On Mon 28-09-20 20:21:08, David Hildenbrand wrote: > Page isolation doesn't actually touch the pages, it simply isolates > pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. > > We already place pages to the tail of the freelists when undoing > isolation via __putback_isolated_page(), let's do it in any case > (e.g., if order <= pageblock_order) and document the behavior. > > Add a "to_tail" parameter to move_freepages_block() but introduce a > a new move_to_free_list_tail() - similar to add_to_free_list_tail(). > > This change results in all pages getting onlined via online_pages() to > be placed to the tail of the freelist. Is there anything preventing to do this unconditionally? Or in other words is any of the existing callers of move_freepages_block benefiting from adding to the head? > Reviewed-by: Oscar Salvador <osalvador@suse.de> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> > Cc: Mel Gorman <mgorman@techsingularity.net> > Cc: Michal Hocko <mhocko@kernel.org> > Cc: Dave Hansen <dave.hansen@intel.com> > Cc: Vlastimil Babka <vbabka@suse.cz> > Cc: Wei Yang <richard.weiyang@linux.alibaba.com> > Cc: Oscar Salvador <osalvador@suse.de> > Cc: Mike Rapoport <rppt@kernel.org> > Cc: Scott Cheloha <cheloha@linux.ibm.com> > Cc: Michael Ellerman <mpe@ellerman.id.au> > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > include/linux/page-isolation.h | 4 ++-- > mm/page_alloc.c | 35 +++++++++++++++++++++++----------- > mm/page_isolation.c | 12 +++++++++--- > 3 files changed, 35 insertions(+), 16 deletions(-) > > diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h > index 572458016331..3eca9b3c5305 100644 > --- a/include/linux/page-isolation.h > +++ b/include/linux/page-isolation.h > @@ -36,8 +36,8 @@ static inline bool is_migrate_isolate(int migratetype) > struct page *has_unmovable_pages(struct zone *zone, struct page *page, > int migratetype, int flags); > void set_pageblock_migratetype(struct page *page, int migratetype); > -int move_freepages_block(struct zone *zone, struct page *page, > - int migratetype, int *num_movable); > +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, > + bool to_tail, int *num_movable); > > /* > * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 9e3ed4a6f69a..d5a5f528b8ca 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -905,6 +905,15 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, > list_move(&page->lru, &area->free_list[migratetype]); > } > > +/* Used for pages which are on another list */ > +static inline void move_to_free_list_tail(struct page *page, struct zone *zone, > + unsigned int order, int migratetype) > +{ > + struct free_area *area = &zone->free_area[order]; > + > + list_move_tail(&page->lru, &area->free_list[migratetype]); > +} > + > static inline void del_page_from_free_list(struct page *page, struct zone *zone, > unsigned int order) > { > @@ -2338,9 +2347,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, > * Note that start_page and end_pages are not aligned on a pageblock > * boundary. If alignment is required, use move_freepages_block() > */ > -static int move_freepages(struct zone *zone, > - struct page *start_page, struct page *end_page, > - int migratetype, int *num_movable) > +static int move_freepages(struct zone *zone, struct page *start_page, > + struct page *end_page, int migratetype, > + bool to_tail, int *num_movable) > { > struct page *page; > unsigned int order; > @@ -2371,7 +2380,10 @@ static int move_freepages(struct zone *zone, > VM_BUG_ON_PAGE(page_zone(page) != zone, page); > > order = page_order(page); > - move_to_free_list(page, zone, order, migratetype); > + if (to_tail) > + move_to_free_list_tail(page, zone, order, migratetype); > + else > + move_to_free_list(page, zone, order, migratetype); > page += 1 << order; > pages_moved += 1 << order; > } > @@ -2379,8 +2391,8 @@ static int move_freepages(struct zone *zone, > return pages_moved; > } > > -int move_freepages_block(struct zone *zone, struct page *page, > - int migratetype, int *num_movable) > +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, > + bool to_tail, int *num_movable) > { > unsigned long start_pfn, end_pfn; > struct page *start_page, *end_page; > @@ -2401,7 +2413,7 @@ int move_freepages_block(struct zone *zone, struct page *page, > return 0; > > return move_freepages(zone, start_page, end_page, migratetype, > - num_movable); > + to_tail, num_movable); > } > > static void change_pageblock_range(struct page *pageblock_page, > @@ -2526,8 +2538,8 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, > if (!whole_block) > goto single_page; > > - free_pages = move_freepages_block(zone, page, start_type, > - &movable_pages); > + free_pages = move_freepages_block(zone, page, start_type, false, > + &movable_pages); > /* > * Determine how many pages are compatible with our allocation. > * For movable allocation, it's the number of movable pages which > @@ -2635,7 +2647,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, > && !is_migrate_cma(mt)) { > zone->nr_reserved_highatomic += pageblock_nr_pages; > set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); > - move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); > + move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, false, > + NULL); > } > > out_unlock: > @@ -2711,7 +2724,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, > */ > set_pageblock_migratetype(page, ac->migratetype); > ret = move_freepages_block(zone, page, ac->migratetype, > - NULL); > + false, NULL); > if (ret) { > spin_unlock_irqrestore(&zone->lock, flags); > return ret; > diff --git a/mm/page_isolation.c b/mm/page_isolation.c > index abfe26ad59fd..de44e1329706 100644 > --- a/mm/page_isolation.c > +++ b/mm/page_isolation.c > @@ -45,7 +45,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ > set_pageblock_migratetype(page, MIGRATE_ISOLATE); > zone->nr_isolate_pageblock++; > nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, > - NULL); > + false, NULL); > > __mod_zone_freepage_state(zone, -nr_pages, mt); > spin_unlock_irqrestore(&zone->lock, flags); > @@ -83,7 +83,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) > * Because freepage with more than pageblock_order on isolated > * pageblock is restricted to merge due to freepage counting problem, > * it is possible that there is free buddy page. > - * move_freepages_block() doesn't care of merge so we need other > + * move_freepages_block() don't care about merging, so we need another > * approach in order to merge them. Isolation and free will make > * these pages to be merged. > */ > @@ -106,9 +106,15 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) > * If we isolate freepage with more than pageblock_order, there > * should be no freepage in the range, so we could avoid costly > * pageblock scanning for freepage moving. > + * > + * We didn't actually touch any of the isolated pages, so place them > + * to the tail of the freelist. This is an optimization for memory > + * onlining - just onlined memory won't immediately be considered for > + * allocation. > */ > if (!isolated_page) { > - nr_pages = move_freepages_block(zone, page, migratetype, NULL); > + nr_pages = move_freepages_block(zone, page, migratetype, true, > + NULL); > __mod_zone_freepage_state(zone, nr_pages, migratetype); > } > set_pageblock_migratetype(page, migratetype); > -- > 2.26.2
On 02.10.20 15:24, Michal Hocko wrote: > On Mon 28-09-20 20:21:08, David Hildenbrand wrote: >> Page isolation doesn't actually touch the pages, it simply isolates >> pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. >> >> We already place pages to the tail of the freelists when undoing >> isolation via __putback_isolated_page(), let's do it in any case >> (e.g., if order <= pageblock_order) and document the behavior. >> >> Add a "to_tail" parameter to move_freepages_block() but introduce a >> a new move_to_free_list_tail() - similar to add_to_free_list_tail(). >> >> This change results in all pages getting onlined via online_pages() to >> be placed to the tail of the freelist. > > Is there anything preventing to do this unconditionally? Or in other > words is any of the existing callers of move_freepages_block benefiting > from adding to the head? 1. mm/page_isolation.c:set_migratetype_isolate() We move stuff to the MIGRATE_ISOLATE list, we don't care about the order there. 2. steal_suitable_fallback(): I don't think we care too much about the order when already stealing pageblocks ... and the freelist is empty I guess? 3. reserve_highatomic_pageblock()/unreserve_highatomic_pageblock() Not sure if we really care. Good question, I tried to be careful of what I touch. Thoughts?
On Fri 02-10-20 17:20:09, David Hildenbrand wrote: > On 02.10.20 15:24, Michal Hocko wrote: > > On Mon 28-09-20 20:21:08, David Hildenbrand wrote: > >> Page isolation doesn't actually touch the pages, it simply isolates > >> pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. > >> > >> We already place pages to the tail of the freelists when undoing > >> isolation via __putback_isolated_page(), let's do it in any case > >> (e.g., if order <= pageblock_order) and document the behavior. > >> > >> Add a "to_tail" parameter to move_freepages_block() but introduce a > >> a new move_to_free_list_tail() - similar to add_to_free_list_tail(). > >> > >> This change results in all pages getting onlined via online_pages() to > >> be placed to the tail of the freelist. > > > > Is there anything preventing to do this unconditionally? Or in other > > words is any of the existing callers of move_freepages_block benefiting > > from adding to the head? > > 1. mm/page_isolation.c:set_migratetype_isolate() > > We move stuff to the MIGRATE_ISOLATE list, we don't care about the order > there. > > 2. steal_suitable_fallback(): > > I don't think we care too much about the order when already stealing > pageblocks ... and the freelist is empty I guess? > > 3. reserve_highatomic_pageblock()/unreserve_highatomic_pageblock() > > Not sure if we really care. Honestly, I have no idea. I can imagine that some atomic high order workloads (e.g. in net) might benefit from cache line hot pages but I am not sure this is really observable. If yes it would likely be better to have this documented than relying on wild guess. If we do not have any evidence then I would vote for simplicity first and go with unconditional add_to_tail which would simply your patch a bit. Maybe Vlastimil or Mel would have a better picture.
On Mon, Oct 05, 2020 at 08:56:48AM +0200, Michal Hocko wrote: > On Fri 02-10-20 17:20:09, David Hildenbrand wrote: > > On 02.10.20 15:24, Michal Hocko wrote: > > > On Mon 28-09-20 20:21:08, David Hildenbrand wrote: > > >> Page isolation doesn't actually touch the pages, it simply isolates > > >> pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. > > >> > > >> We already place pages to the tail of the freelists when undoing > > >> isolation via __putback_isolated_page(), let's do it in any case > > >> (e.g., if order <= pageblock_order) and document the behavior. > > >> > > >> Add a "to_tail" parameter to move_freepages_block() but introduce a > > >> a new move_to_free_list_tail() - similar to add_to_free_list_tail(). > > >> > > >> This change results in all pages getting onlined via online_pages() to > > >> be placed to the tail of the freelist. > > > > > > Is there anything preventing to do this unconditionally? Or in other > > > words is any of the existing callers of move_freepages_block benefiting > > > from adding to the head? > > > > 1. mm/page_isolation.c:set_migratetype_isolate() > > > > We move stuff to the MIGRATE_ISOLATE list, we don't care about the order > > there. > > > > 2. steal_suitable_fallback(): > > > > I don't think we care too much about the order when already stealing > > pageblocks ... and the freelist is empty I guess? > > > > 3. reserve_highatomic_pageblock()/unreserve_highatomic_pageblock() > > > > Not sure if we really care. > > Honestly, I have no idea. I can imagine that some atomic high order > workloads (e.g. in net) might benefit from cache line hot pages but I am > not sure this is really observable. The highatomic reserve is more concerned that about the allocation succeeding than it is about cache hotness.
On 05.10.20 10:20, Mel Gorman wrote: > On Mon, Oct 05, 2020 at 08:56:48AM +0200, Michal Hocko wrote: >> On Fri 02-10-20 17:20:09, David Hildenbrand wrote: >>> On 02.10.20 15:24, Michal Hocko wrote: >>>> On Mon 28-09-20 20:21:08, David Hildenbrand wrote: >>>>> Page isolation doesn't actually touch the pages, it simply isolates >>>>> pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. >>>>> >>>>> We already place pages to the tail of the freelists when undoing >>>>> isolation via __putback_isolated_page(), let's do it in any case >>>>> (e.g., if order <= pageblock_order) and document the behavior. >>>>> >>>>> Add a "to_tail" parameter to move_freepages_block() but introduce a >>>>> a new move_to_free_list_tail() - similar to add_to_free_list_tail(). >>>>> >>>>> This change results in all pages getting onlined via online_pages() to >>>>> be placed to the tail of the freelist. >>>> >>>> Is there anything preventing to do this unconditionally? Or in other >>>> words is any of the existing callers of move_freepages_block benefiting >>>> from adding to the head? >>> >>> 1. mm/page_isolation.c:set_migratetype_isolate() >>> >>> We move stuff to the MIGRATE_ISOLATE list, we don't care about the order >>> there. >>> >>> 2. steal_suitable_fallback(): >>> >>> I don't think we care too much about the order when already stealing >>> pageblocks ... and the freelist is empty I guess? >>> >>> 3. reserve_highatomic_pageblock()/unreserve_highatomic_pageblock() >>> >>> Not sure if we really care. >> >> Honestly, I have no idea. I can imagine that some atomic high order >> workloads (e.g. in net) might benefit from cache line hot pages but I am >> not sure this is really observable. > > The highatomic reserve is more concerned that about the allocation > succeeding than it is about cache hotness. > Thanks Mel and Michal. I'll simplify this patch then - and if it turns out to be an actual problem, we can change that one instance, adding a proper comment. Thanks!
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 572458016331..3eca9b3c5305 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -36,8 +36,8 @@ static inline bool is_migrate_isolate(int migratetype) struct page *has_unmovable_pages(struct zone *zone, struct page *page, int migratetype, int flags); void set_pageblock_migratetype(struct page *page, int migratetype); -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype, int *num_movable); +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, + bool to_tail, int *num_movable); /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9e3ed4a6f69a..d5a5f528b8ca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -905,6 +905,15 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, list_move(&page->lru, &area->free_list[migratetype]); } +/* Used for pages which are on another list */ +static inline void move_to_free_list_tail(struct page *page, struct zone *zone, + unsigned int order, int migratetype) +{ + struct free_area *area = &zone->free_area[order]; + + list_move_tail(&page->lru, &area->free_list[migratetype]); +} + static inline void del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order) { @@ -2338,9 +2347,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ -static int move_freepages(struct zone *zone, - struct page *start_page, struct page *end_page, - int migratetype, int *num_movable) +static int move_freepages(struct zone *zone, struct page *start_page, + struct page *end_page, int migratetype, + bool to_tail, int *num_movable) { struct page *page; unsigned int order; @@ -2371,7 +2380,10 @@ static int move_freepages(struct zone *zone, VM_BUG_ON_PAGE(page_zone(page) != zone, page); order = page_order(page); - move_to_free_list(page, zone, order, migratetype); + if (to_tail) + move_to_free_list_tail(page, zone, order, migratetype); + else + move_to_free_list(page, zone, order, migratetype); page += 1 << order; pages_moved += 1 << order; } @@ -2379,8 +2391,8 @@ static int move_freepages(struct zone *zone, return pages_moved; } -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype, int *num_movable) +int move_freepages_block(struct zone *zone, struct page *page, int migratetype, + bool to_tail, int *num_movable) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; @@ -2401,7 +2413,7 @@ int move_freepages_block(struct zone *zone, struct page *page, return 0; return move_freepages(zone, start_page, end_page, migratetype, - num_movable); + to_tail, num_movable); } static void change_pageblock_range(struct page *pageblock_page, @@ -2526,8 +2538,8 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, if (!whole_block) goto single_page; - free_pages = move_freepages_block(zone, page, start_type, - &movable_pages); + free_pages = move_freepages_block(zone, page, start_type, false, + &movable_pages); /* * Determine how many pages are compatible with our allocation. * For movable allocation, it's the number of movable pages which @@ -2635,7 +2647,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, && !is_migrate_cma(mt)) { zone->nr_reserved_highatomic += pageblock_nr_pages; set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); - move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); + move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, false, + NULL); } out_unlock: @@ -2711,7 +2724,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, */ set_pageblock_migratetype(page, ac->migratetype); ret = move_freepages_block(zone, page, ac->migratetype, - NULL); + false, NULL); if (ret) { spin_unlock_irqrestore(&zone->lock, flags); return ret; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index abfe26ad59fd..de44e1329706 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -45,7 +45,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ set_pageblock_migratetype(page, MIGRATE_ISOLATE); zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, - NULL); + false, NULL); __mod_zone_freepage_state(zone, -nr_pages, mt); spin_unlock_irqrestore(&zone->lock, flags); @@ -83,7 +83,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) * Because freepage with more than pageblock_order on isolated * pageblock is restricted to merge due to freepage counting problem, * it is possible that there is free buddy page. - * move_freepages_block() doesn't care of merge so we need other + * move_freepages_block() don't care about merging, so we need another * approach in order to merge them. Isolation and free will make * these pages to be merged. */ @@ -106,9 +106,15 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) * If we isolate freepage with more than pageblock_order, there * should be no freepage in the range, so we could avoid costly * pageblock scanning for freepage moving. + * + * We didn't actually touch any of the isolated pages, so place them + * to the tail of the freelist. This is an optimization for memory + * onlining - just onlined memory won't immediately be considered for + * allocation. */ if (!isolated_page) { - nr_pages = move_freepages_block(zone, page, migratetype, NULL); + nr_pages = move_freepages_block(zone, page, migratetype, true, + NULL); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype);