diff mbox series

[v2,03/10] mm: don't pass "enum lru_list" to lru list addition functions

Message ID 20210122220600.906146-4-yuzhao@google.com (mailing list archive)
State New, archived
Headers show
Series mm: lru related cleanups | expand

Commit Message

Yu Zhao Jan. 22, 2021, 10:05 p.m. UTC
The "enum lru_list" parameter to add_page_to_lru_list() and
add_page_to_lru_list_tail() is redundant in the sense that it can
be extracted from the "struct page" parameter by page_lru().

A caveat is that we need to make sure PageActive() or
PageUnevictable() is correctly set or cleared before calling
these two functions. And they are indeed.

Link: https://lore.kernel.org/linux-mm/20201207220949.830352-4-yuzhao@google.com/
Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 include/linux/mm_inline.h |  8 ++++++--
 mm/swap.c                 | 15 +++++++--------
 mm/vmscan.c               |  6 ++----
 3 files changed, 15 insertions(+), 14 deletions(-)

Comments

Vlastimil Babka Jan. 26, 2021, 7:13 p.m. UTC | #1
On 1/22/21 11:05 PM, Yu Zhao wrote:
> The "enum lru_list" parameter to add_page_to_lru_list() and
> add_page_to_lru_list_tail() is redundant in the sense that it can
> be extracted from the "struct page" parameter by page_lru().

Okay, however, it means repeated extraction of a value that we already knew. The
result of compilation is rather sad. This is bloat-o-meter on mm/built-in.a
(without CONFIG_DEBUG_VM, btw) between patch 2 and 5:

add/remove: 0/0 grow/shrink: 10/5 up/down: 1837/-60 (1777)
Function                                     old     new   delta
lru_deactivate_file_fn                       932    1368    +436
lru_lazyfree_fn.part                         629     953    +324
check_move_unevictable_pages                1171    1424    +253
__activate_page.part                         735     984    +249
lru_deactivate_fn.part                       593     822    +229
perf_trace_mm_lru_insertion                  458     560    +102
trace_event_raw_event_mm_lru_insertion       412     500     +88
__page_cache_release                         479     558     +79
release_pages                               1430    1499     +69
pagevec_move_tail_fn.part                    761     769      +8
isolate_lru_page                             471     470      -1
__bpf_trace_mm_lru_insertion                   7       5      -2
__traceiter_mm_lru_insertion                  55      47      -8
isolate_migratepages_block                  3200    3185     -15
__pagevec_lru_add_fn                        1092    1058     -34


> A caveat is that we need to make sure PageActive() or
> PageUnevictable() is correctly set or cleared before calling
> these two functions. And they are indeed.
> 
> Link: https://lore.kernel.org/linux-mm/20201207220949.830352-4-yuzhao@google.com/
> Signed-off-by: Yu Zhao <yuzhao@google.com>
> ---
>  include/linux/mm_inline.h |  8 ++++++--
>  mm/swap.c                 | 15 +++++++--------
>  mm/vmscan.c               |  6 ++----
>  3 files changed, 15 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 2889741f450a..130ba3201d3f 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -106,15 +106,19 @@ static __always_inline enum lru_list page_lru(struct page *page)
>  }
>  
>  static __always_inline void add_page_to_lru_list(struct page *page,
> -				struct lruvec *lruvec, enum lru_list lru)
> +				struct lruvec *lruvec)
>  {
> +	enum lru_list lru = page_lru(page);
> +
>  	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
>  	list_add(&page->lru, &lruvec->lists[lru]);
>  }
>  
>  static __always_inline void add_page_to_lru_list_tail(struct page *page,
> -				struct lruvec *lruvec, enum lru_list lru)
> +				struct lruvec *lruvec)
>  {
> +	enum lru_list lru = page_lru(page);
> +
>  	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
>  	list_add_tail(&page->lru, &lruvec->lists[lru]);
>  }
> diff --git a/mm/swap.c b/mm/swap.c
> index 490553f3f9ef..4b058ef37add 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
>  	if (!PageUnevictable(page)) {
>  		del_page_from_lru_list(page, lruvec, page_lru(page));
>  		ClearPageActive(page);
> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> +		add_page_to_lru_list_tail(page, lruvec);
>  		__count_vm_events(PGROTATED, thp_nr_pages(page));
>  	}
>  }
> @@ -313,8 +313,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec)
>  
>  		del_page_from_lru_list(page, lruvec, lru);
>  		SetPageActive(page);
> -		lru += LRU_ACTIVE;
> -		add_page_to_lru_list(page, lruvec, lru);
> +		add_page_to_lru_list(page, lruvec);
>  		trace_mm_lru_activate(page);
>  
>  		__count_vm_events(PGACTIVATE, nr_pages);
> @@ -543,14 +542,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
>  		 * It can make readahead confusing.  But race window
>  		 * is _really_ small and  it's non-critical problem.
>  		 */
> -		add_page_to_lru_list(page, lruvec, lru);
> +		add_page_to_lru_list(page, lruvec);
>  		SetPageReclaim(page);
>  	} else {
>  		/*
>  		 * The page's writeback ends up during pagevec
>  		 * We moves tha page into tail of inactive.
>  		 */
> -		add_page_to_lru_list_tail(page, lruvec, lru);
> +		add_page_to_lru_list_tail(page, lruvec);
>  		__count_vm_events(PGROTATED, nr_pages);
>  	}
>  
> @@ -570,7 +569,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
>  		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
>  		ClearPageActive(page);
>  		ClearPageReferenced(page);
> -		add_page_to_lru_list(page, lruvec, lru);
> +		add_page_to_lru_list(page, lruvec);
>  
>  		__count_vm_events(PGDEACTIVATE, nr_pages);
>  		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
> @@ -595,7 +594,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
>  		 * anonymous pages
>  		 */
>  		ClearPageSwapBacked(page);
> -		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
> +		add_page_to_lru_list(page, lruvec);
>  
>  		__count_vm_events(PGLAZYFREE, nr_pages);
>  		__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
> @@ -1005,7 +1004,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
>  			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
>  	}
>  
> -	add_page_to_lru_list(page, lruvec, lru);
> +	add_page_to_lru_list(page, lruvec);
>  	trace_mm_lru_insertion(page, lru);
>  }
>  
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 19875660e8f8..09e4f97488c9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1867,7 +1867,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
>  		 * inhibits memcg migration).
>  		 */
>  		VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
> -		add_page_to_lru_list(page, lruvec, page_lru(page));
> +		add_page_to_lru_list(page, lruvec);
>  		nr_pages = thp_nr_pages(page);
>  		nr_moved += nr_pages;
>  		if (PageActive(page))
> @@ -4282,12 +4282,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
>  
>  		lruvec = relock_page_lruvec_irq(page, lruvec);
>  		if (page_evictable(page) && PageUnevictable(page)) {
> -			enum lru_list lru = page_lru_base_type(page);
> -
>  			VM_BUG_ON_PAGE(PageActive(page), page);
>  			ClearPageUnevictable(page);
>  			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
> -			add_page_to_lru_list(page, lruvec, lru);
> +			add_page_to_lru_list(page, lruvec);
>  			pgrescued += nr_pages;
>  		}
>  		SetPageLRU(page);
>
Yu Zhao Jan. 26, 2021, 9:34 p.m. UTC | #2
On Tue, Jan 26, 2021 at 08:13:11PM +0100, Vlastimil Babka wrote:
> On 1/22/21 11:05 PM, Yu Zhao wrote:
> > The "enum lru_list" parameter to add_page_to_lru_list() and
> > add_page_to_lru_list_tail() is redundant in the sense that it can
> > be extracted from the "struct page" parameter by page_lru().
> 
> Okay, however, it means repeated extraction of a value that we already knew. The
> result of compilation is rather sad. This is bloat-o-meter on mm/built-in.a
> (without CONFIG_DEBUG_VM, btw) between patch 2 and 5:

Thanks for noticing this, Vlastimil. Should I drop the rest of the
series except the first patch?

> add/remove: 0/0 grow/shrink: 10/5 up/down: 1837/-60 (1777)
> Function                                     old     new   delta
> lru_deactivate_file_fn                       932    1368    +436
> lru_lazyfree_fn.part                         629     953    +324
> check_move_unevictable_pages                1171    1424    +253
> __activate_page.part                         735     984    +249
> lru_deactivate_fn.part                       593     822    +229
> perf_trace_mm_lru_insertion                  458     560    +102
> trace_event_raw_event_mm_lru_insertion       412     500     +88
> __page_cache_release                         479     558     +79
> release_pages                               1430    1499     +69
> pagevec_move_tail_fn.part                    761     769      +8
> isolate_lru_page                             471     470      -1
> __bpf_trace_mm_lru_insertion                   7       5      -2
> __traceiter_mm_lru_insertion                  55      47      -8
> isolate_migratepages_block                  3200    3185     -15
> __pagevec_lru_add_fn                        1092    1058     -34
> 
> 
> > A caveat is that we need to make sure PageActive() or
> > PageUnevictable() is correctly set or cleared before calling
> > these two functions. And they are indeed.
> > 
> > Link: https://lore.kernel.org/linux-mm/20201207220949.830352-4-yuzhao@google.com/
> > Signed-off-by: Yu Zhao <yuzhao@google.com>
> > ---
> >  include/linux/mm_inline.h |  8 ++++++--
> >  mm/swap.c                 | 15 +++++++--------
> >  mm/vmscan.c               |  6 ++----
> >  3 files changed, 15 insertions(+), 14 deletions(-)
> > 
> > diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> > index 2889741f450a..130ba3201d3f 100644
> > --- a/include/linux/mm_inline.h
> > +++ b/include/linux/mm_inline.h
> > @@ -106,15 +106,19 @@ static __always_inline enum lru_list page_lru(struct page *page)
> >  }
> >  
> >  static __always_inline void add_page_to_lru_list(struct page *page,
> > -				struct lruvec *lruvec, enum lru_list lru)
> > +				struct lruvec *lruvec)
> >  {
> > +	enum lru_list lru = page_lru(page);
> > +
> >  	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
> >  	list_add(&page->lru, &lruvec->lists[lru]);
> >  }
> >  
> >  static __always_inline void add_page_to_lru_list_tail(struct page *page,
> > -				struct lruvec *lruvec, enum lru_list lru)
> > +				struct lruvec *lruvec)
> >  {
> > +	enum lru_list lru = page_lru(page);
> > +
> >  	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
> >  	list_add_tail(&page->lru, &lruvec->lists[lru]);
> >  }
> > diff --git a/mm/swap.c b/mm/swap.c
> > index 490553f3f9ef..4b058ef37add 100644
> > --- a/mm/swap.c
> > +++ b/mm/swap.c
> > @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
> >  	if (!PageUnevictable(page)) {
> >  		del_page_from_lru_list(page, lruvec, page_lru(page));
> >  		ClearPageActive(page);
> > -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> > +		add_page_to_lru_list_tail(page, lruvec);
> >  		__count_vm_events(PGROTATED, thp_nr_pages(page));
> >  	}
> >  }
> > @@ -313,8 +313,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec)
> >  
> >  		del_page_from_lru_list(page, lruvec, lru);
> >  		SetPageActive(page);
> > -		lru += LRU_ACTIVE;
> > -		add_page_to_lru_list(page, lruvec, lru);
> > +		add_page_to_lru_list(page, lruvec);
> >  		trace_mm_lru_activate(page);
> >  
> >  		__count_vm_events(PGACTIVATE, nr_pages);
> > @@ -543,14 +542,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
> >  		 * It can make readahead confusing.  But race window
> >  		 * is _really_ small and  it's non-critical problem.
> >  		 */
> > -		add_page_to_lru_list(page, lruvec, lru);
> > +		add_page_to_lru_list(page, lruvec);
> >  		SetPageReclaim(page);
> >  	} else {
> >  		/*
> >  		 * The page's writeback ends up during pagevec
> >  		 * We moves tha page into tail of inactive.
> >  		 */
> > -		add_page_to_lru_list_tail(page, lruvec, lru);
> > +		add_page_to_lru_list_tail(page, lruvec);
> >  		__count_vm_events(PGROTATED, nr_pages);
> >  	}
> >  
> > @@ -570,7 +569,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
> >  		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
> >  		ClearPageActive(page);
> >  		ClearPageReferenced(page);
> > -		add_page_to_lru_list(page, lruvec, lru);
> > +		add_page_to_lru_list(page, lruvec);
> >  
> >  		__count_vm_events(PGDEACTIVATE, nr_pages);
> >  		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
> > @@ -595,7 +594,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
> >  		 * anonymous pages
> >  		 */
> >  		ClearPageSwapBacked(page);
> > -		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
> > +		add_page_to_lru_list(page, lruvec);
> >  
> >  		__count_vm_events(PGLAZYFREE, nr_pages);
> >  		__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
> > @@ -1005,7 +1004,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
> >  			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
> >  	}
> >  
> > -	add_page_to_lru_list(page, lruvec, lru);
> > +	add_page_to_lru_list(page, lruvec);
> >  	trace_mm_lru_insertion(page, lru);
> >  }
> >  
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 19875660e8f8..09e4f97488c9 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -1867,7 +1867,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> >  		 * inhibits memcg migration).
> >  		 */
> >  		VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
> > -		add_page_to_lru_list(page, lruvec, page_lru(page));
> > +		add_page_to_lru_list(page, lruvec);
> >  		nr_pages = thp_nr_pages(page);
> >  		nr_moved += nr_pages;
> >  		if (PageActive(page))
> > @@ -4282,12 +4282,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
> >  
> >  		lruvec = relock_page_lruvec_irq(page, lruvec);
> >  		if (page_evictable(page) && PageUnevictable(page)) {
> > -			enum lru_list lru = page_lru_base_type(page);
> > -
> >  			VM_BUG_ON_PAGE(PageActive(page), page);
> >  			ClearPageUnevictable(page);
> >  			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
> > -			add_page_to_lru_list(page, lruvec, lru);
> > +			add_page_to_lru_list(page, lruvec);
> >  			pgrescued += nr_pages;
> >  		}
> >  		SetPageLRU(page);
> > 
>
Matthew Wilcox Jan. 26, 2021, 10:01 p.m. UTC | #3
On Fri, Jan 22, 2021 at 03:05:53PM -0700, Yu Zhao wrote:
> +++ b/mm/swap.c
> @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
>  	if (!PageUnevictable(page)) {
>  		del_page_from_lru_list(page, lruvec, page_lru(page));
>  		ClearPageActive(page);
> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> +		add_page_to_lru_list_tail(page, lruvec);
>  		__count_vm_events(PGROTATED, thp_nr_pages(page));
>  	}

Is it profitable to do ...

-		del_page_from_lru_list(page, lruvec, page_lru(page));
+		enum lru_list lru = page_lru(page);
+		del_page_from_lru_list(page, lruvec, lru);
		ClearPageActive(page);
-		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
+		lru &= ~LRU_ACTIVE;
+		add_page_to_lru_list_tail(page, lruvec, lru);
Yu Zhao Jan. 26, 2021, 10:14 p.m. UTC | #4
On Tue, Jan 26, 2021 at 10:01:11PM +0000, Matthew Wilcox wrote:
> On Fri, Jan 22, 2021 at 03:05:53PM -0700, Yu Zhao wrote:
> > +++ b/mm/swap.c
> > @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
> >  	if (!PageUnevictable(page)) {
> >  		del_page_from_lru_list(page, lruvec, page_lru(page));
> >  		ClearPageActive(page);
> > -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> > +		add_page_to_lru_list_tail(page, lruvec);
> >  		__count_vm_events(PGROTATED, thp_nr_pages(page));
> >  	}
> 
> Is it profitable to do ...
> 
> -		del_page_from_lru_list(page, lruvec, page_lru(page));
> +		enum lru_list lru = page_lru(page);
> +		del_page_from_lru_list(page, lruvec, lru);
> 		ClearPageActive(page);
> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> +		lru &= ~LRU_ACTIVE;
> +		add_page_to_lru_list_tail(page, lruvec, lru);

Ok, now we want to trade readability for size. Sure, I'll see how
much we could squeeze.
Vlastimil Babka Jan. 27, 2021, 10:51 a.m. UTC | #5
On 1/26/21 10:34 PM, Yu Zhao wrote:
> On Tue, Jan 26, 2021 at 08:13:11PM +0100, Vlastimil Babka wrote:
>> On 1/22/21 11:05 PM, Yu Zhao wrote:
>> > The "enum lru_list" parameter to add_page_to_lru_list() and
>> > add_page_to_lru_list_tail() is redundant in the sense that it can
>> > be extracted from the "struct page" parameter by page_lru().
>> 
>> Okay, however, it means repeated extraction of a value that we already knew. The
>> result of compilation is rather sad. This is bloat-o-meter on mm/built-in.a
>> (without CONFIG_DEBUG_VM, btw) between patch 2 and 5:
> 
> Thanks for noticing this, Vlastimil. Should I drop the rest of the
> series except the first patch?

I didn't check how 6-10 look (and if they are still applicable without 3-5),
this was just between 2 and 5.
Andrew Morton Feb. 23, 2021, 10:50 p.m. UTC | #6
On Tue, 26 Jan 2021 15:14:38 -0700 Yu Zhao <yuzhao@google.com> wrote:

> On Tue, Jan 26, 2021 at 10:01:11PM +0000, Matthew Wilcox wrote:
> > On Fri, Jan 22, 2021 at 03:05:53PM -0700, Yu Zhao wrote:
> > > +++ b/mm/swap.c
> > > @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
> > >  	if (!PageUnevictable(page)) {
> > >  		del_page_from_lru_list(page, lruvec, page_lru(page));
> > >  		ClearPageActive(page);
> > > -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> > > +		add_page_to_lru_list_tail(page, lruvec);
> > >  		__count_vm_events(PGROTATED, thp_nr_pages(page));
> > >  	}
> > 
> > Is it profitable to do ...
> > 
> > -		del_page_from_lru_list(page, lruvec, page_lru(page));
> > +		enum lru_list lru = page_lru(page);
> > +		del_page_from_lru_list(page, lruvec, lru);
> > 		ClearPageActive(page);
> > -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> > +		lru &= ~LRU_ACTIVE;
> > +		add_page_to_lru_list_tail(page, lruvec, lru);
> 
> Ok, now we want to trade readability for size. Sure, I'll see how
> much we could squeeze.

As nothing has happened here and the code bloat issue remains, I'll
hold this series out of 5.12-rc1.
Yu Zhao Feb. 24, 2021, 5:29 a.m. UTC | #7
On Tue, Feb 23, 2021 at 02:50:11PM -0800, Andrew Morton wrote:
> On Tue, 26 Jan 2021 15:14:38 -0700 Yu Zhao <yuzhao@google.com> wrote:
> 
> > On Tue, Jan 26, 2021 at 10:01:11PM +0000, Matthew Wilcox wrote:
> > > On Fri, Jan 22, 2021 at 03:05:53PM -0700, Yu Zhao wrote:
> > > > +++ b/mm/swap.c
> > > > @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
> > > >  	if (!PageUnevictable(page)) {
> > > >  		del_page_from_lru_list(page, lruvec, page_lru(page));
> > > >  		ClearPageActive(page);
> > > > -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> > > > +		add_page_to_lru_list_tail(page, lruvec);
> > > >  		__count_vm_events(PGROTATED, thp_nr_pages(page));
> > > >  	}
> > > 
> > > Is it profitable to do ...
> > > 
> > > -		del_page_from_lru_list(page, lruvec, page_lru(page));
> > > +		enum lru_list lru = page_lru(page);
> > > +		del_page_from_lru_list(page, lruvec, lru);
> > > 		ClearPageActive(page);
> > > -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> > > +		lru &= ~LRU_ACTIVE;
> > > +		add_page_to_lru_list_tail(page, lruvec, lru);
> > 
> > Ok, now we want to trade readability for size. Sure, I'll see how
> > much we could squeeze.
> 
> As nothing has happened here and the code bloat issue remains, I'll
> hold this series out of 5.12-rc1.

Sorry for the slow response. I was trying to ascertain why
page_lru(), a tiny helper, could bloat vmlinux by O(KB). It turned out
compound_head() included in Page{Active,Unevictable} is a nuisance in
our case. Testing PG_{active,unevictable} against
compound_head(page)->flags is really unnecessary because all lru
operations are eventually done on page->lru not
compound_head(page)->lru. With the following change, which sacrifices
the readability a bit, we gain 998 bytes with Clang but lose 227 bytes
with GCC, which IMO is a win. (We use Clang by default.)


diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 355ea1ee32bd..ec0878a3cdfe 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -46,14 +46,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
 {
 	VM_BUG_ON_PAGE(!PageLRU(page), page);
 
-	__ClearPageLRU(page);
-
 	/* this shouldn't happen, so leave the flags to bad_page() */
-	if (PageActive(page) && PageUnevictable(page))
+	if ((page->flags & (BIT(PG_active) | BIT(PG_unevictable))) ==
+	    (BIT(PG_active) | BIT(PG_unevictable)))
 		return;
 
-	__ClearPageActive(page);
-	__ClearPageUnevictable(page);
+	page->flags &= ~(BIT(PG_lru) | BIT(PG_active) | BIT(PG_unevictable));
 }
 
 /**
@@ -65,18 +63,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
  */
 static __always_inline enum lru_list page_lru(struct page *page)
 {
-	enum lru_list lru;
+	unsigned long flags = READ_ONCE(page->flags);
 
 	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
 
-	if (PageUnevictable(page))
-		return LRU_UNEVICTABLE;
-
-	lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
-	if (PageActive(page))
-		lru += LRU_ACTIVE;
-
-	return lru;
+	return (flags & BIT(PG_unevictable)) ? LRU_UNEVICTABLE :
+	       (LRU_FILE * !(flags & BIT(PG_swapbacked)) + !!(flags & BIT(PG_active)));
 }
 
 static __always_inline void add_page_to_lru_list(struct page *page,


I'll post this as a separate patch. Below the bloat-o-meter collected
on top of c03c21ba6f4e.

$ ./scripts/bloat-o-meter ../vmlinux.clang.orig ../vmlinux.clang
add/remove: 0/1 grow/shrink: 7/10 up/down: 191/-1189 (-998)
Function                                     old     new   delta
lru_lazyfree_fn                              848     893     +45
lru_deactivate_file_fn                      1037    1075     +38
perf_trace_mm_lru_insertion                  515     548     +33
check_move_unevictable_pages                 983    1006     +23
__activate_page                              706     729     +23
trace_event_raw_event_mm_lru_insertion       476     497     +21
lru_deactivate_fn                            691     699      +8
__bpf_trace_mm_lru_insertion                  13      11      -2
__traceiter_mm_lru_insertion                  67      62      -5
move_pages_to_lru                            964     881     -83
__pagevec_lru_add_fn                         665     581     -84
isolate_lru_page                             524     419    -105
__munlock_pagevec                           1609    1481    -128
isolate_migratepages_block                  3370    3237    -133
__page_cache_release                         556     413    -143
lruvec_lru_size                              151       -    -151
release_pages                               1025     866    -159
pagevec_move_tail_fn                         805     609    -196
Total: Before=19502982, After=19501984, chg -0.01%

$ ./scripts/bloat-o-meter ../vmlinux.gcc.orig ../vmlinux.gcc
add/remove: 0/1 grow/shrink: 9/9 up/down: 1010/-783 (227)
Function                                     old     new   delta
shrink_lruvec                               1690    1950    +260
lru_deactivate_file_fn                       961    1128    +167
isolate_migratepages_block                  3286    3427    +141
check_move_unevictable_pages                1042    1170    +128
lru_lazyfree_fn                              709     822    +113
lru_deactivate_fn                            665     724     +59
__activate_page                              703     760     +57
trace_event_raw_event_mm_lru_insertion       432     478     +46
perf_trace_mm_lru_insertion                  464     503     +39
__bpf_trace_mm_lru_insertion                  13      11      -2
__traceiter_mm_lru_insertion                  66      57      -9
isolate_lru_page                             472     405     -67
__munlock_pagevec                           1282    1212     -70
__pagevec_lru_add                            976     893     -83
__page_cache_release                         508     418     -90
release_pages                                978     887     -91
move_pages_to_lru                            954     853    -101
lruvec_lru_size                              131       -    -131
pagevec_move_tail_fn                         770     631    -139
Total: Before=19237248, After=19237475, chg +0.00%
Alex Shi Feb. 24, 2021, 8:06 a.m. UTC | #8
在 2021/2/24 下午1:29, Yu Zhao 写道:
> On Tue, Feb 23, 2021 at 02:50:11PM -0800, Andrew Morton wrote:
>> On Tue, 26 Jan 2021 15:14:38 -0700 Yu Zhao <yuzhao@google.com> wrote:
>>
>>> On Tue, Jan 26, 2021 at 10:01:11PM +0000, Matthew Wilcox wrote:
>>>> On Fri, Jan 22, 2021 at 03:05:53PM -0700, Yu Zhao wrote:
>>>>> +++ b/mm/swap.c
>>>>> @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
>>>>>  	if (!PageUnevictable(page)) {
>>>>>  		del_page_from_lru_list(page, lruvec, page_lru(page));
>>>>>  		ClearPageActive(page);
>>>>> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
>>>>> +		add_page_to_lru_list_tail(page, lruvec);
>>>>>  		__count_vm_events(PGROTATED, thp_nr_pages(page));
>>>>>  	}
>>>>
>>>> Is it profitable to do ...
>>>>
>>>> -		del_page_from_lru_list(page, lruvec, page_lru(page));
>>>> +		enum lru_list lru = page_lru(page);
>>>> +		del_page_from_lru_list(page, lruvec, lru);
>>>> 		ClearPageActive(page);
>>>> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
>>>> +		lru &= ~LRU_ACTIVE;
>>>> +		add_page_to_lru_list_tail(page, lruvec, lru);
>>>
>>> Ok, now we want to trade readability for size. Sure, I'll see how
>>> much we could squeeze.
>>
>> As nothing has happened here and the code bloat issue remains, I'll
>> hold this series out of 5.12-rc1.
> 
> Sorry for the slow response. I was trying to ascertain why
> page_lru(), a tiny helper, could bloat vmlinux by O(KB). It turned out
> compound_head() included in Page{Active,Unevictable} is a nuisance in
> our case. Testing PG_{active,unevictable} against
> compound_head(page)->flags is really unnecessary because all lru
> operations are eventually done on page->lru not
> compound_head(page)->lru. With the following change, which sacrifices
> the readability a bit, we gain 998 bytes with Clang but lose 227 bytes
> with GCC, which IMO is a win. (We use Clang by default.)
> 
> 
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 355ea1ee32bd..ec0878a3cdfe 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -46,14 +46,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
>  {
>  	VM_BUG_ON_PAGE(!PageLRU(page), page);
>  
> -	__ClearPageLRU(page);
> -
>  	/* this shouldn't happen, so leave the flags to bad_page() */
> -	if (PageActive(page) && PageUnevictable(page))
> +	if ((page->flags & (BIT(PG_active) | BIT(PG_unevictable))) ==
> +	    (BIT(PG_active) | BIT(PG_unevictable)))
>  		return;
>  
> -	__ClearPageActive(page);
> -	__ClearPageUnevictable(page);
> +	page->flags &= ~(BIT(PG_lru) | BIT(PG_active) | BIT(PG_unevictable));
>  }
>  
>  /**
> @@ -65,18 +63,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
>   */
>  static __always_inline enum lru_list page_lru(struct page *page)
>  {
> -	enum lru_list lru;
> +	unsigned long flags = READ_ONCE(page->flags);
>  
>  	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
>  
> -	if (PageUnevictable(page))
> -		return LRU_UNEVICTABLE;
> -
> -	lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
> -	if (PageActive(page))
> -		lru += LRU_ACTIVE;
> -
> -	return lru;
> +	return (flags & BIT(PG_unevictable)) ? LRU_UNEVICTABLE :
> +	       (LRU_FILE * !(flags & BIT(PG_swapbacked)) + !!(flags & BIT(PG_active)));

Currently each of page flags used different flags policy, does this mean above flags could be
change to PF_ANY policy?

Thanks
Alex

>  }
>  
>  static __always_inline void add_page_to_lru_list(struct page *page,
> 
> 
> I'll post this as a separate patch. Below the bloat-o-meter collected
> on top of c03c21ba6f4e.
> 
> $ ./scripts/bloat-o-meter ../vmlinux.clang.orig ../vmlinux.clang
> add/remove: 0/1 grow/shrink: 7/10 up/down: 191/-1189 (-998)
> Function                                     old     new   delta
> lru_lazyfree_fn                              848     893     +45
> lru_deactivate_file_fn                      1037    1075     +38
> perf_trace_mm_lru_insertion                  515     548     +33
> check_move_unevictable_pages                 983    1006     +23
> __activate_page                              706     729     +23
> trace_event_raw_event_mm_lru_insertion       476     497     +21
> lru_deactivate_fn                            691     699      +8
> __bpf_trace_mm_lru_insertion                  13      11      -2
> __traceiter_mm_lru_insertion                  67      62      -5
> move_pages_to_lru                            964     881     -83
> __pagevec_lru_add_fn                         665     581     -84
> isolate_lru_page                             524     419    -105
> __munlock_pagevec                           1609    1481    -128
> isolate_migratepages_block                  3370    3237    -133
> __page_cache_release                         556     413    -143
> lruvec_lru_size                              151       -    -151
> release_pages                               1025     866    -159
> pagevec_move_tail_fn                         805     609    -196
> Total: Before=19502982, After=19501984, chg -0.01%
> 
> $ ./scripts/bloat-o-meter ../vmlinux.gcc.orig ../vmlinux.gcc
> add/remove: 0/1 grow/shrink: 9/9 up/down: 1010/-783 (227)
> Function                                     old     new   delta
> shrink_lruvec                               1690    1950    +260
> lru_deactivate_file_fn                       961    1128    +167
> isolate_migratepages_block                  3286    3427    +141
> check_move_unevictable_pages                1042    1170    +128
> lru_lazyfree_fn                              709     822    +113
> lru_deactivate_fn                            665     724     +59
> __activate_page                              703     760     +57
> trace_event_raw_event_mm_lru_insertion       432     478     +46
> perf_trace_mm_lru_insertion                  464     503     +39
> __bpf_trace_mm_lru_insertion                  13      11      -2
> __traceiter_mm_lru_insertion                  66      57      -9
> isolate_lru_page                             472     405     -67
> __munlock_pagevec                           1282    1212     -70
> __pagevec_lru_add                            976     893     -83
> __page_cache_release                         508     418     -90
> release_pages                                978     887     -91
> move_pages_to_lru                            954     853    -101
> lruvec_lru_size                              131       -    -131
> pagevec_move_tail_fn                         770     631    -139
> Total: Before=19237248, After=19237475, chg +0.00%
>
Yu Zhao Feb. 24, 2021, 8:37 a.m. UTC | #9
On Wed, Feb 24, 2021 at 04:06:45PM +0800, Alex Shi wrote:
> 
> 
> 在 2021/2/24 下午1:29, Yu Zhao 写道:
> > On Tue, Feb 23, 2021 at 02:50:11PM -0800, Andrew Morton wrote:
> >> On Tue, 26 Jan 2021 15:14:38 -0700 Yu Zhao <yuzhao@google.com> wrote:
> >>
> >>> On Tue, Jan 26, 2021 at 10:01:11PM +0000, Matthew Wilcox wrote:
> >>>> On Fri, Jan 22, 2021 at 03:05:53PM -0700, Yu Zhao wrote:
> >>>>> +++ b/mm/swap.c
> >>>>> @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
> >>>>>  	if (!PageUnevictable(page)) {
> >>>>>  		del_page_from_lru_list(page, lruvec, page_lru(page));
> >>>>>  		ClearPageActive(page);
> >>>>> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> >>>>> +		add_page_to_lru_list_tail(page, lruvec);
> >>>>>  		__count_vm_events(PGROTATED, thp_nr_pages(page));
> >>>>>  	}
> >>>>
> >>>> Is it profitable to do ...
> >>>>
> >>>> -		del_page_from_lru_list(page, lruvec, page_lru(page));
> >>>> +		enum lru_list lru = page_lru(page);
> >>>> +		del_page_from_lru_list(page, lruvec, lru);
> >>>> 		ClearPageActive(page);
> >>>> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> >>>> +		lru &= ~LRU_ACTIVE;
> >>>> +		add_page_to_lru_list_tail(page, lruvec, lru);
> >>>
> >>> Ok, now we want to trade readability for size. Sure, I'll see how
> >>> much we could squeeze.
> >>
> >> As nothing has happened here and the code bloat issue remains, I'll
> >> hold this series out of 5.12-rc1.
> > 
> > Sorry for the slow response. I was trying to ascertain why
> > page_lru(), a tiny helper, could bloat vmlinux by O(KB). It turned out
> > compound_head() included in Page{Active,Unevictable} is a nuisance in
> > our case. Testing PG_{active,unevictable} against
> > compound_head(page)->flags is really unnecessary because all lru
> > operations are eventually done on page->lru not
> > compound_head(page)->lru. With the following change, which sacrifices
> > the readability a bit, we gain 998 bytes with Clang but lose 227 bytes
> > with GCC, which IMO is a win. (We use Clang by default.)
> > 
> > 
> > diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> > index 355ea1ee32bd..ec0878a3cdfe 100644
> > --- a/include/linux/mm_inline.h
> > +++ b/include/linux/mm_inline.h
> > @@ -46,14 +46,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
> >  {
> >  	VM_BUG_ON_PAGE(!PageLRU(page), page);
> >  
> > -	__ClearPageLRU(page);
> > -
> >  	/* this shouldn't happen, so leave the flags to bad_page() */
> > -	if (PageActive(page) && PageUnevictable(page))
> > +	if ((page->flags & (BIT(PG_active) | BIT(PG_unevictable))) ==
> > +	    (BIT(PG_active) | BIT(PG_unevictable)))
> >  		return;
> >  
> > -	__ClearPageActive(page);
> > -	__ClearPageUnevictable(page);
> > +	page->flags &= ~(BIT(PG_lru) | BIT(PG_active) | BIT(PG_unevictable));
> >  }
> >  
> >  /**
> > @@ -65,18 +63,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
> >   */
> >  static __always_inline enum lru_list page_lru(struct page *page)
> >  {
> > -	enum lru_list lru;
> > +	unsigned long flags = READ_ONCE(page->flags);
> >  
> >  	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
> >  
> > -	if (PageUnevictable(page))
> > -		return LRU_UNEVICTABLE;
> > -
> > -	lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
> > -	if (PageActive(page))
> > -		lru += LRU_ACTIVE;
> > -
> > -	return lru;
> > +	return (flags & BIT(PG_unevictable)) ? LRU_UNEVICTABLE :
> > +	       (LRU_FILE * !(flags & BIT(PG_swapbacked)) + !!(flags & BIT(PG_active)));
> 
> Currently each of page flags used different flags policy, does this mean above flags could be
> change to PF_ANY policy?

That's a good question. Semantically, no because
PG_{active,unevictable} only apply to head pages. But practically,
I think the answer is yes, and the only place that needs to
explicitly call compound_head() is gather_stats() in
fs/proc/task_mmu.c, IIRC.

> 
> Thanks
> Alex
> 
> >  }
> >  
> >  static __always_inline void add_page_to_lru_list(struct page *page,
> > 
> > 
> > I'll post this as a separate patch. Below the bloat-o-meter collected
> > on top of c03c21ba6f4e.
> > 
> > $ ./scripts/bloat-o-meter ../vmlinux.clang.orig ../vmlinux.clang
> > add/remove: 0/1 grow/shrink: 7/10 up/down: 191/-1189 (-998)
> > Function                                     old     new   delta
> > lru_lazyfree_fn                              848     893     +45
> > lru_deactivate_file_fn                      1037    1075     +38
> > perf_trace_mm_lru_insertion                  515     548     +33
> > check_move_unevictable_pages                 983    1006     +23
> > __activate_page                              706     729     +23
> > trace_event_raw_event_mm_lru_insertion       476     497     +21
> > lru_deactivate_fn                            691     699      +8
> > __bpf_trace_mm_lru_insertion                  13      11      -2
> > __traceiter_mm_lru_insertion                  67      62      -5
> > move_pages_to_lru                            964     881     -83
> > __pagevec_lru_add_fn                         665     581     -84
> > isolate_lru_page                             524     419    -105
> > __munlock_pagevec                           1609    1481    -128
> > isolate_migratepages_block                  3370    3237    -133
> > __page_cache_release                         556     413    -143
> > lruvec_lru_size                              151       -    -151
> > release_pages                               1025     866    -159
> > pagevec_move_tail_fn                         805     609    -196
> > Total: Before=19502982, After=19501984, chg -0.01%
> > 
> > $ ./scripts/bloat-o-meter ../vmlinux.gcc.orig ../vmlinux.gcc
> > add/remove: 0/1 grow/shrink: 9/9 up/down: 1010/-783 (227)
> > Function                                     old     new   delta
> > shrink_lruvec                               1690    1950    +260
> > lru_deactivate_file_fn                       961    1128    +167
> > isolate_migratepages_block                  3286    3427    +141
> > check_move_unevictable_pages                1042    1170    +128
> > lru_lazyfree_fn                              709     822    +113
> > lru_deactivate_fn                            665     724     +59
> > __activate_page                              703     760     +57
> > trace_event_raw_event_mm_lru_insertion       432     478     +46
> > perf_trace_mm_lru_insertion                  464     503     +39
> > __bpf_trace_mm_lru_insertion                  13      11      -2
> > __traceiter_mm_lru_insertion                  66      57      -9
> > isolate_lru_page                             472     405     -67
> > __munlock_pagevec                           1282    1212     -70
> > __pagevec_lru_add                            976     893     -83
> > __page_cache_release                         508     418     -90
> > release_pages                                978     887     -91
> > move_pages_to_lru                            954     853    -101
> > lruvec_lru_size                              131       -    -131
> > pagevec_move_tail_fn                         770     631    -139
> > Total: Before=19237248, After=19237475, chg +0.00%
> >
Alex Shi Feb. 24, 2021, 9:01 a.m. UTC | #10
在 2021/2/24 下午4:37, Yu Zhao 写道:
>>> @@ -65,18 +63,12 @@ static __always_inline void __clear_page_lru_flags(struct page *page)
>>>   */
>>>  static __always_inline enum lru_list page_lru(struct page *page)
>>>  {
>>> -	enum lru_list lru;
>>> +	unsigned long flags = READ_ONCE(page->flags);
>>>  
>>>  	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
>>>  
>>> -	if (PageUnevictable(page))
>>> -		return LRU_UNEVICTABLE;
>>> -
>>> -	lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
>>> -	if (PageActive(page))
>>> -		lru += LRU_ACTIVE;
>>> -
>>> -	return lru;
>>> +	return (flags & BIT(PG_unevictable)) ? LRU_UNEVICTABLE :
>>> +	       (LRU_FILE * !(flags & BIT(PG_swapbacked)) + !!(flags & BIT(PG_active)));
>> Currently each of page flags used different flags policy, does this mean above flags could be
>> change to PF_ANY policy?
> That's a good question. Semantically, no because
> PG_{active,unevictable} only apply to head pages. But practically,
> I think the answer is yes, and the only place that needs to
> explicitly call compound_head() is gather_stats() in
> fs/proc/task_mmu.c, IIRC.
> 


A quick testing for your testing request:

# ll vmlinux vmlinux.new
-rwxr-xr-x 1 root root 62245304 Feb 24 16:57 vmlinux
-rwxr-xr-x 1 root root 62245280 Feb 24 16:55 vmlinux.new
# gcc --version
gcc (GCC) 8.3.1 20190311 (Red Hat 8.3.1-3)
Copyright (C) 2018 Free Software Foundation, Inc.
This is free software; see the source for copying conditions.  There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

# scripts/bloat-o-meter vmlinux vmlinux.new
add/remove: 0/0 grow/shrink: 1/15 up/down: 1/-2008 (-2007)
Function                                     old     new   delta
vermagic                                      37      38      +1
trace_event_raw_event_mm_lru_insertion       471     418     -53
perf_trace_mm_lru_insertion                  526     473     -53
__munlock_pagevec                           1134    1069     -65
isolate_migratepages_block                  2623    2547     -76
isolate_lru_page                             384     303     -81
__pagevec_lru_add                            753     652    -101
release_pages                                780     667    -113
__page_cache_release                         429     276    -153
move_pages_to_lru                            871     702    -169
lru_lazyfree_fn                              712     539    -173
check_move_unevictable_pages                 938     763    -175
__activate_page                              665     488    -177
lru_deactivate_fn                            636     452    -184
pagevec_move_tail_fn                         597     411    -186
lru_deactivate_file_fn                      1000     751    -249
Total: Before=17029652, After=17027645, chg -0.01%
diff mbox series

Patch

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 2889741f450a..130ba3201d3f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -106,15 +106,19 @@  static __always_inline enum lru_list page_lru(struct page *page)
 }
 
 static __always_inline void add_page_to_lru_list(struct page *page,
-				struct lruvec *lruvec, enum lru_list lru)
+				struct lruvec *lruvec)
 {
+	enum lru_list lru = page_lru(page);
+
 	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
 	list_add(&page->lru, &lruvec->lists[lru]);
 }
 
 static __always_inline void add_page_to_lru_list_tail(struct page *page,
-				struct lruvec *lruvec, enum lru_list lru)
+				struct lruvec *lruvec)
 {
+	enum lru_list lru = page_lru(page);
+
 	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
 	list_add_tail(&page->lru, &lruvec->lists[lru]);
 }
diff --git a/mm/swap.c b/mm/swap.c
index 490553f3f9ef..4b058ef37add 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -231,7 +231,7 @@  static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
 	if (!PageUnevictable(page)) {
 		del_page_from_lru_list(page, lruvec, page_lru(page));
 		ClearPageActive(page);
-		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
+		add_page_to_lru_list_tail(page, lruvec);
 		__count_vm_events(PGROTATED, thp_nr_pages(page));
 	}
 }
@@ -313,8 +313,7 @@  static void __activate_page(struct page *page, struct lruvec *lruvec)
 
 		del_page_from_lru_list(page, lruvec, lru);
 		SetPageActive(page);
-		lru += LRU_ACTIVE;
-		add_page_to_lru_list(page, lruvec, lru);
+		add_page_to_lru_list(page, lruvec);
 		trace_mm_lru_activate(page);
 
 		__count_vm_events(PGACTIVATE, nr_pages);
@@ -543,14 +542,14 @@  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
 		 * It can make readahead confusing.  But race window
 		 * is _really_ small and  it's non-critical problem.
 		 */
-		add_page_to_lru_list(page, lruvec, lru);
+		add_page_to_lru_list(page, lruvec);
 		SetPageReclaim(page);
 	} else {
 		/*
 		 * The page's writeback ends up during pagevec
 		 * We moves tha page into tail of inactive.
 		 */
-		add_page_to_lru_list_tail(page, lruvec, lru);
+		add_page_to_lru_list_tail(page, lruvec);
 		__count_vm_events(PGROTATED, nr_pages);
 	}
 
@@ -570,7 +569,7 @@  static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
 		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
 		ClearPageActive(page);
 		ClearPageReferenced(page);
-		add_page_to_lru_list(page, lruvec, lru);
+		add_page_to_lru_list(page, lruvec);
 
 		__count_vm_events(PGDEACTIVATE, nr_pages);
 		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
@@ -595,7 +594,7 @@  static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
 		 * anonymous pages
 		 */
 		ClearPageSwapBacked(page);
-		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
+		add_page_to_lru_list(page, lruvec);
 
 		__count_vm_events(PGLAZYFREE, nr_pages);
 		__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
@@ -1005,7 +1004,7 @@  static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
 			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
 	}
 
-	add_page_to_lru_list(page, lruvec, lru);
+	add_page_to_lru_list(page, lruvec);
 	trace_mm_lru_insertion(page, lru);
 }
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 19875660e8f8..09e4f97488c9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1867,7 +1867,7 @@  static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 		 * inhibits memcg migration).
 		 */
 		VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
-		add_page_to_lru_list(page, lruvec, page_lru(page));
+		add_page_to_lru_list(page, lruvec);
 		nr_pages = thp_nr_pages(page);
 		nr_moved += nr_pages;
 		if (PageActive(page))
@@ -4282,12 +4282,10 @@  void check_move_unevictable_pages(struct pagevec *pvec)
 
 		lruvec = relock_page_lruvec_irq(page, lruvec);
 		if (page_evictable(page) && PageUnevictable(page)) {
-			enum lru_list lru = page_lru_base_type(page);
-
 			VM_BUG_ON_PAGE(PageActive(page), page);
 			ClearPageUnevictable(page);
 			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
-			add_page_to_lru_list(page, lruvec, lru);
+			add_page_to_lru_list(page, lruvec);
 			pgrescued += nr_pages;
 		}
 		SetPageLRU(page);