diff mbox series

[v16,13/22] mm/lru: introduce TestClearPageLRU

Message ID 1594429136-20002-14-git-send-email-alex.shi@linux.alibaba.com (mailing list archive)
State New, archived
Headers show
Series per memcg lru_lock | expand

Commit Message

Alex Shi July 11, 2020, 12:58 a.m. UTC
Combine PageLRU check and ClearPageLRU into a function by new
introduced func TestClearPageLRU. This function will be used as page
isolation precondition to prevent other isolations some where else.
Then there are may non PageLRU page on lru list, need to remove BUG
checking accordingly.

Hugh Dickins pointed that __page_cache_release and release_pages
has no need to do atomic clear bit since no user on the page at that
moment. and no need get_page() before lru bit clear in isolate_lru_page,
since it '(1) Must be called with an elevated refcount on the page'.

As Andrew Morton mentioned this change would dirty cacheline for page
isn't on LRU. But the lost would be acceptable with Rong Chen
<rong.a.chen@intel.com> report:
https://lkml.org/lkml/2020/3/4/173

Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Cc: cgroups@vger.kernel.org
Cc: linux-mm@kvack.org
---
 include/linux/page-flags.h |  1 +
 mm/mlock.c                 |  3 +--
 mm/swap.c                  |  6 ++----
 mm/vmscan.c                | 26 +++++++++++---------------
 4 files changed, 15 insertions(+), 21 deletions(-)

Comments

Alex Shi July 16, 2020, 9:06 a.m. UTC | #1
Hi Johannes,

The patchset looks good from logical and testing part. Is there any concern
for any patches?

Thanks
Alex

在 2020/7/11 上午8:58, Alex Shi 写道:
> Combine PageLRU check and ClearPageLRU into a function by new
> introduced func TestClearPageLRU. This function will be used as page
> isolation precondition to prevent other isolations some where else.
> Then there are may non PageLRU page on lru list, need to remove BUG
> checking accordingly.
> 
> Hugh Dickins pointed that __page_cache_release and release_pages
> has no need to do atomic clear bit since no user on the page at that
> moment. and no need get_page() before lru bit clear in isolate_lru_page,
> since it '(1) Must be called with an elevated refcount on the page'.
> 
> As Andrew Morton mentioned this change would dirty cacheline for page
> isn't on LRU. But the lost would be acceptable with Rong Chen
> <rong.a.chen@intel.com> report:
> https://lkml.org/lkml/2020/3/4/173
> 
> Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
> Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-kernel@vger.kernel.org
> Cc: cgroups@vger.kernel.org
> Cc: linux-mm@kvack.org
> ---
>  include/linux/page-flags.h |  1 +
>  mm/mlock.c                 |  3 +--
>  mm/swap.c                  |  6 ++----
>  mm/vmscan.c                | 26 +++++++++++---------------
>  4 files changed, 15 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 6be1aa559b1e..9554ed1387dc 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -326,6 +326,7 @@ static inline void page_init_poison(struct page *page, size_t size)
>  PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
>  	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
>  PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
> +	TESTCLEARFLAG(LRU, lru, PF_HEAD)
>  PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
>  	TESTCLEARFLAG(Active, active, PF_HEAD)
>  PAGEFLAG(Workingset, workingset, PF_HEAD)
> diff --git a/mm/mlock.c b/mm/mlock.c
> index f8736136fad7..228ba5a8e0a5 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -108,13 +108,12 @@ void mlock_vma_page(struct page *page)
>   */
>  static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
>  {
> -	if (PageLRU(page)) {
> +	if (TestClearPageLRU(page)) {
>  		struct lruvec *lruvec;
>  
>  		lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
>  		if (getpage)
>  			get_page(page);
> -		ClearPageLRU(page);
>  		del_page_from_lru_list(page, lruvec, page_lru(page));
>  		return true;
>  	}
> diff --git a/mm/swap.c b/mm/swap.c
> index f645965fde0e..5092fe9c8c47 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -83,10 +83,9 @@ static void __page_cache_release(struct page *page)
>  		struct lruvec *lruvec;
>  		unsigned long flags;
>  
> +		__ClearPageLRU(page);
>  		spin_lock_irqsave(&pgdat->lru_lock, flags);
>  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
> -		VM_BUG_ON_PAGE(!PageLRU(page), page);
> -		__ClearPageLRU(page);
>  		del_page_from_lru_list(page, lruvec, page_off_lru(page));
>  		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
>  	}
> @@ -878,9 +877,8 @@ void release_pages(struct page **pages, int nr)
>  				spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
>  			}
>  
> -			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
> -			VM_BUG_ON_PAGE(!PageLRU(page), page);
>  			__ClearPageLRU(page);
> +			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
>  			del_page_from_lru_list(page, lruvec, page_off_lru(page));
>  		}
>  
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index c1c4259b4de5..18986fefd49b 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1548,16 +1548,16 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
>  {
>  	int ret = -EINVAL;
>  
> -	/* Only take pages on the LRU. */
> -	if (!PageLRU(page))
> -		return ret;
> -
>  	/* Compaction should not handle unevictable pages but CMA can do so */
>  	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
>  		return ret;
>  
>  	ret = -EBUSY;
>  
> +	/* Only take pages on the LRU. */
> +	if (!PageLRU(page))
> +		return ret;
> +
>  	/*
>  	 * To minimise LRU disruption, the caller can indicate that it only
>  	 * wants to isolate pages it will be able to operate on without
> @@ -1671,8 +1671,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
>  		page = lru_to_page(src);
>  		prefetchw_prev_lru_page(page, src, flags);
>  
> -		VM_BUG_ON_PAGE(!PageLRU(page), page);
> -
>  		nr_pages = compound_nr(page);
>  		total_scan += nr_pages;
>  
> @@ -1769,21 +1767,19 @@ int isolate_lru_page(struct page *page)
>  	VM_BUG_ON_PAGE(!page_count(page), page);
>  	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
>  
> -	if (PageLRU(page)) {
> +	if (TestClearPageLRU(page)) {
>  		pg_data_t *pgdat = page_pgdat(page);
>  		struct lruvec *lruvec;
> +		int lru = page_lru(page);
>  
> -		spin_lock_irq(&pgdat->lru_lock);
> +		get_page(page);
>  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
> -		if (PageLRU(page)) {
> -			int lru = page_lru(page);
> -			get_page(page);
> -			ClearPageLRU(page);
> -			del_page_from_lru_list(page, lruvec, lru);
> -			ret = 0;
> -		}
> +		spin_lock_irq(&pgdat->lru_lock);
> +		del_page_from_lru_list(page, lruvec, lru);
>  		spin_unlock_irq(&pgdat->lru_lock);
> +		ret = 0;
>  	}
> +
>  	return ret;
>  }
>  
>
Alexander Duyck July 16, 2020, 9:12 p.m. UTC | #2
On Fri, Jul 10, 2020 at 5:59 PM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>
> Combine PageLRU check and ClearPageLRU into a function by new
> introduced func TestClearPageLRU. This function will be used as page
> isolation precondition to prevent other isolations some where else.
> Then there are may non PageLRU page on lru list, need to remove BUG
> checking accordingly.
>
> Hugh Dickins pointed that __page_cache_release and release_pages
> has no need to do atomic clear bit since no user on the page at that
> moment. and no need get_page() before lru bit clear in isolate_lru_page,
> since it '(1) Must be called with an elevated refcount on the page'.
>
> As Andrew Morton mentioned this change would dirty cacheline for page
> isn't on LRU. But the lost would be acceptable with Rong Chen
> <rong.a.chen@intel.com> report:
> https://lkml.org/lkml/2020/3/4/173
>
> Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
> Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-kernel@vger.kernel.org
> Cc: cgroups@vger.kernel.org
> Cc: linux-mm@kvack.org
> ---
>  include/linux/page-flags.h |  1 +
>  mm/mlock.c                 |  3 +--
>  mm/swap.c                  |  6 ++----
>  mm/vmscan.c                | 26 +++++++++++---------------
>  4 files changed, 15 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 6be1aa559b1e..9554ed1387dc 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -326,6 +326,7 @@ static inline void page_init_poison(struct page *page, size_t size)
>  PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
>         __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
>  PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
> +       TESTCLEARFLAG(LRU, lru, PF_HEAD)
>  PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
>         TESTCLEARFLAG(Active, active, PF_HEAD)
>  PAGEFLAG(Workingset, workingset, PF_HEAD)
> diff --git a/mm/mlock.c b/mm/mlock.c
> index f8736136fad7..228ba5a8e0a5 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -108,13 +108,12 @@ void mlock_vma_page(struct page *page)
>   */
>  static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
>  {
> -       if (PageLRU(page)) {
> +       if (TestClearPageLRU(page)) {
>                 struct lruvec *lruvec;
>
>                 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
>                 if (getpage)
>                         get_page(page);
> -               ClearPageLRU(page);
>                 del_page_from_lru_list(page, lruvec, page_lru(page));
>                 return true;
>         }
> diff --git a/mm/swap.c b/mm/swap.c
> index f645965fde0e..5092fe9c8c47 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -83,10 +83,9 @@ static void __page_cache_release(struct page *page)
>                 struct lruvec *lruvec;
>                 unsigned long flags;
>
> +               __ClearPageLRU(page);
>                 spin_lock_irqsave(&pgdat->lru_lock, flags);
>                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
> -               __ClearPageLRU(page);
>                 del_page_from_lru_list(page, lruvec, page_off_lru(page));
>                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
>         }

So this piece doesn't make much sense to me. Why not use
TestClearPageLRU(page) here? Just a few lines above you are testing
for PageLRU(page) and it seems like if you are going to go for an
atomic test/clear and then remove the page from the LRU list you
should be using it here as well otherwise it seems like you could run
into a potential collision since you are testing here without clearing
the bit.

> @@ -878,9 +877,8 @@ void release_pages(struct page **pages, int nr)
>                                 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
>                         }
>
> -                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
> -                       VM_BUG_ON_PAGE(!PageLRU(page), page);
>                         __ClearPageLRU(page);
> +                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
>                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
>                 }
>

Same here. You are just moving the flag clearing, but you didn't
combine it with the test. It seems like if you are expecting this to
be treated as an atomic operation. It should be a relatively low cost
to do since you already should own the cacheline as a result of
calling put_page_testzero so I am not sure why you are not combining
the two.

> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index c1c4259b4de5..18986fefd49b 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1548,16 +1548,16 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
>  {
>         int ret = -EINVAL;
>
> -       /* Only take pages on the LRU. */
> -       if (!PageLRU(page))
> -               return ret;
> -
>         /* Compaction should not handle unevictable pages but CMA can do so */
>         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
>                 return ret;
>
>         ret = -EBUSY;
>
> +       /* Only take pages on the LRU. */
> +       if (!PageLRU(page))
> +               return ret;
> +
>         /*
>          * To minimise LRU disruption, the caller can indicate that it only
>          * wants to isolate pages it will be able to operate on without
> @@ -1671,8 +1671,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
>                 page = lru_to_page(src);
>                 prefetchw_prev_lru_page(page, src, flags);
>
> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
> -
>                 nr_pages = compound_nr(page);
>                 total_scan += nr_pages;
>

So effectively the changes here are making it so that a !PageLRU page
will cycle to the start of the LRU list. Now if I understand correctly
we are guaranteed that if the flag is not set it cannot be set while
we are holding the lru_lock, however it can be cleared while we are
holding the lock, correct? Thus that is why isolate_lru_pages has to
call TestClearPageLRU after the earlier check in __isolate_lru_page.

It might make it more readable to pull in the later patch that
modifies isolate_lru_pages that has it using TestClearPageLRU.

> @@ -1769,21 +1767,19 @@ int isolate_lru_page(struct page *page)
>         VM_BUG_ON_PAGE(!page_count(page), page);
>         WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
>
> -       if (PageLRU(page)) {
> +       if (TestClearPageLRU(page)) {
>                 pg_data_t *pgdat = page_pgdat(page);
>                 struct lruvec *lruvec;
> +               int lru = page_lru(page);
>
> -               spin_lock_irq(&pgdat->lru_lock);
> +               get_page(page);
>                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
> -               if (PageLRU(page)) {
> -                       int lru = page_lru(page);
> -                       get_page(page);
> -                       ClearPageLRU(page);
> -                       del_page_from_lru_list(page, lruvec, lru);
> -                       ret = 0;
> -               }
> +               spin_lock_irq(&pgdat->lru_lock);
> +               del_page_from_lru_list(page, lruvec, lru);
>                 spin_unlock_irq(&pgdat->lru_lock);
> +               ret = 0;
>         }
> +
>         return ret;
>  }
>
> --
> 1.8.3.1
>
>
Alex Shi July 17, 2020, 7:45 a.m. UTC | #3
在 2020/7/17 上午5:12, Alexander Duyck 写道:
> On Fri, Jul 10, 2020 at 5:59 PM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>>
>> Combine PageLRU check and ClearPageLRU into a function by new
>> introduced func TestClearPageLRU. This function will be used as page
>> isolation precondition to prevent other isolations some where else.
>> Then there are may non PageLRU page on lru list, need to remove BUG
>> checking accordingly.
>>
>> Hugh Dickins pointed that __page_cache_release and release_pages
>> has no need to do atomic clear bit since no user on the page at that
>> moment. and no need get_page() before lru bit clear in isolate_lru_page,
>> since it '(1) Must be called with an elevated refcount on the page'.
>>
>> As Andrew Morton mentioned this change would dirty cacheline for page
>> isn't on LRU. But the lost would be acceptable with Rong Chen
>> <rong.a.chen@intel.com> report:
>> https://lkml.org/lkml/2020/3/4/173
>>

...

>> diff --git a/mm/swap.c b/mm/swap.c
>> index f645965fde0e..5092fe9c8c47 100644
>> --- a/mm/swap.c
>> +++ b/mm/swap.c
>> @@ -83,10 +83,9 @@ static void __page_cache_release(struct page *page)
>>                 struct lruvec *lruvec;
>>                 unsigned long flags;
>>
>> +               __ClearPageLRU(page);
>>                 spin_lock_irqsave(&pgdat->lru_lock, flags);
>>                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
>> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
>> -               __ClearPageLRU(page);
>>                 del_page_from_lru_list(page, lruvec, page_off_lru(page));
>>                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
>>         }
> 
> So this piece doesn't make much sense to me. Why not use
> TestClearPageLRU(page) here? Just a few lines above you are testing
> for PageLRU(page) and it seems like if you are going to go for an
> atomic test/clear and then remove the page from the LRU list you
> should be using it here as well otherwise it seems like you could run
> into a potential collision since you are testing here without clearing
> the bit.
> 

Hi Alex,

Thanks a lot for comments! 

In this func's call path __page_cache_release, the page is unlikely be
ClearPageLRU, since this page isn't used by anyone, and going to be freed.
just __ClearPageLRU would be safe, and could save a non lru page flags disturb.


>> @@ -878,9 +877,8 @@ void release_pages(struct page **pages, int nr)
>>                                 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
>>                         }
>>
>> -                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
>> -                       VM_BUG_ON_PAGE(!PageLRU(page), page);
>>                         __ClearPageLRU(page);
>> +                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
>>                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
>>                 }
>>
> 
> Same here. You are just moving the flag clearing, but you didn't
> combine it with the test. It seems like if you are expecting this to
> be treated as an atomic operation. It should be a relatively low cost
> to do since you already should own the cacheline as a result of
> calling put_page_testzero so I am not sure why you are not combining
> the two.

before the ClearPageLRU, there is a put_page_testzero(), that means no one using
this page, and isolate_lru_page can not run on this page the in func checking. 
	VM_BUG_ON_PAGE(!page_count(page), page);
So it would be safe here.


> 
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index c1c4259b4de5..18986fefd49b 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -1548,16 +1548,16 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
>>  {
>>         int ret = -EINVAL;
>>
>> -       /* Only take pages on the LRU. */
>> -       if (!PageLRU(page))
>> -               return ret;
>> -
>>         /* Compaction should not handle unevictable pages but CMA can do so */
>>         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
>>                 return ret;
>>
>>         ret = -EBUSY;
>>
>> +       /* Only take pages on the LRU. */
>> +       if (!PageLRU(page))
>> +               return ret;
>> +
>>         /*
>>          * To minimise LRU disruption, the caller can indicate that it only
>>          * wants to isolate pages it will be able to operate on without
>> @@ -1671,8 +1671,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
>>                 page = lru_to_page(src);
>>                 prefetchw_prev_lru_page(page, src, flags);
>>
>> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
>> -
>>                 nr_pages = compound_nr(page);
>>                 total_scan += nr_pages;
>>
> 
> So effectively the changes here are making it so that a !PageLRU page
> will cycle to the start of the LRU list. Now if I understand correctly
> we are guaranteed that if the flag is not set it cannot be set while
> we are holding the lru_lock, however it can be cleared while we are
> holding the lock, correct? Thus that is why isolate_lru_pages has to
> call TestClearPageLRU after the earlier check in __isolate_lru_page.

Right. 

> 
> It might make it more readable to pull in the later patch that
> modifies isolate_lru_pages that has it using TestClearPageLRU.
As to this change, It has to do in this patch, since any TestClearPageLRU may
cause lru bit miss in the lru list, so the precondication check has to
removed here.

Thank
Alex
Alexander Duyck July 17, 2020, 6:26 p.m. UTC | #4
On Fri, Jul 17, 2020 at 12:46 AM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>
>
>
> 在 2020/7/17 上午5:12, Alexander Duyck 写道:
> > On Fri, Jul 10, 2020 at 5:59 PM Alex Shi <alex.shi@linux.alibaba.com> wrote:
> >>
> >> Combine PageLRU check and ClearPageLRU into a function by new
> >> introduced func TestClearPageLRU. This function will be used as page
> >> isolation precondition to prevent other isolations some where else.
> >> Then there are may non PageLRU page on lru list, need to remove BUG
> >> checking accordingly.
> >>
> >> Hugh Dickins pointed that __page_cache_release and release_pages
> >> has no need to do atomic clear bit since no user on the page at that
> >> moment. and no need get_page() before lru bit clear in isolate_lru_page,
> >> since it '(1) Must be called with an elevated refcount on the page'.
> >>
> >> As Andrew Morton mentioned this change would dirty cacheline for page
> >> isn't on LRU. But the lost would be acceptable with Rong Chen
> >> <rong.a.chen@intel.com> report:
> >> https://lkml.org/lkml/2020/3/4/173
> >>
>
> ...
>
> >> diff --git a/mm/swap.c b/mm/swap.c
> >> index f645965fde0e..5092fe9c8c47 100644
> >> --- a/mm/swap.c
> >> +++ b/mm/swap.c
> >> @@ -83,10 +83,9 @@ static void __page_cache_release(struct page *page)
> >>                 struct lruvec *lruvec;
> >>                 unsigned long flags;
> >>
> >> +               __ClearPageLRU(page);
> >>                 spin_lock_irqsave(&pgdat->lru_lock, flags);
> >>                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
> >> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
> >> -               __ClearPageLRU(page);
> >>                 del_page_from_lru_list(page, lruvec, page_off_lru(page));
> >>                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
> >>         }
> >
> > So this piece doesn't make much sense to me. Why not use
> > TestClearPageLRU(page) here? Just a few lines above you are testing
> > for PageLRU(page) and it seems like if you are going to go for an
> > atomic test/clear and then remove the page from the LRU list you
> > should be using it here as well otherwise it seems like you could run
> > into a potential collision since you are testing here without clearing
> > the bit.
> >
>
> Hi Alex,
>
> Thanks a lot for comments!
>
> In this func's call path __page_cache_release, the page is unlikely be
> ClearPageLRU, since this page isn't used by anyone, and going to be freed.
> just __ClearPageLRU would be safe, and could save a non lru page flags disturb.

So if I understand what you are saying correctly you are indicating
that this page should likely not have the LRU flag set and that we
just transitioned it from 1 -> 0 so there should be nobody else
accessing it correct?

It might be useful to document this somewhere. Essentially what we are
doing then is breaking this up into the following cases.

1. Setting the LRU bit requires holding the LRU lock
2. Clearing the LRU bit requires either:
        a. Use of atomic operations if page count is 1 or more
        b. Non-atomic operations can be used if we cleared last reference count

Is my understanding on this correct? So we have essentially two
scenarios, one for the get_page_unless_zero case, and another with the
put_page_testzero.

> >> @@ -878,9 +877,8 @@ void release_pages(struct page **pages, int nr)
> >>                                 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
> >>                         }
> >>
> >> -                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
> >> -                       VM_BUG_ON_PAGE(!PageLRU(page), page);
> >>                         __ClearPageLRU(page);
> >> +                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
> >>                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
> >>                 }
> >>
> >
> > Same here. You are just moving the flag clearing, but you didn't
> > combine it with the test. It seems like if you are expecting this to
> > be treated as an atomic operation. It should be a relatively low cost
> > to do since you already should own the cacheline as a result of
> > calling put_page_testzero so I am not sure why you are not combining
> > the two.
>
> before the ClearPageLRU, there is a put_page_testzero(), that means no one using
> this page, and isolate_lru_page can not run on this page the in func checking.
>         VM_BUG_ON_PAGE(!page_count(page), page);
> So it would be safe here.

Okay, so this is another 2b case as defined above then.

> >
> >> diff --git a/mm/vmscan.c b/mm/vmscan.c
> >> index c1c4259b4de5..18986fefd49b 100644
> >> --- a/mm/vmscan.c
> >> +++ b/mm/vmscan.c
> >> @@ -1548,16 +1548,16 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
> >>  {
> >>         int ret = -EINVAL;
> >>
> >> -       /* Only take pages on the LRU. */
> >> -       if (!PageLRU(page))
> >> -               return ret;
> >> -
> >>         /* Compaction should not handle unevictable pages but CMA can do so */
> >>         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
> >>                 return ret;
> >>
> >>         ret = -EBUSY;
> >>
> >> +       /* Only take pages on the LRU. */
> >> +       if (!PageLRU(page))
> >> +               return ret;
> >> +
> >>         /*
> >>          * To minimise LRU disruption, the caller can indicate that it only
> >>          * wants to isolate pages it will be able to operate on without
> >> @@ -1671,8 +1671,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
> >>                 page = lru_to_page(src);
> >>                 prefetchw_prev_lru_page(page, src, flags);
> >>
> >> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
> >> -
> >>                 nr_pages = compound_nr(page);
> >>                 total_scan += nr_pages;
> >>
> >
> > So effectively the changes here are making it so that a !PageLRU page
> > will cycle to the start of the LRU list. Now if I understand correctly
> > we are guaranteed that if the flag is not set it cannot be set while
> > we are holding the lru_lock, however it can be cleared while we are
> > holding the lock, correct? Thus that is why isolate_lru_pages has to
> > call TestClearPageLRU after the earlier check in __isolate_lru_page.
>
> Right.
>
> >
> > It might make it more readable to pull in the later patch that
> > modifies isolate_lru_pages that has it using TestClearPageLRU.
> As to this change, It has to do in this patch, since any TestClearPageLRU may
> cause lru bit miss in the lru list, so the precondication check has to
> removed here.

So I think some of my cognitive dissonance is from the fact that you
really are doing two different things here. You aren't really
implementing the full TestClearPageLRU until patch 15. So this patch
is doing part of 2a and 2b, and then patch 15 is following up and
completing the 2a cases. I still think it might make more sense to
pull out the pieces related to 2b and move them into a patch before
this with documentation explaining that there should be no competition
for the LRU flag because the page has transitioned to a reference
count of zero. Then take the remaining bits and combine them with
patch 15 since the description for the two is pretty similar.
Alex Shi July 19, 2020, 4:45 a.m. UTC | #5
在 2020/7/18 上午2:26, Alexander Duyck 写道:
> On Fri, Jul 17, 2020 at 12:46 AM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>>
>>
>>
>> 在 2020/7/17 上午5:12, Alexander Duyck 写道:
>>> On Fri, Jul 10, 2020 at 5:59 PM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>>>>
>>>> Combine PageLRU check and ClearPageLRU into a function by new
>>>> introduced func TestClearPageLRU. This function will be used as page
>>>> isolation precondition to prevent other isolations some where else.
>>>> Then there are may non PageLRU page on lru list, need to remove BUG
>>>> checking accordingly.
>>>>
>>>> Hugh Dickins pointed that __page_cache_release and release_pages
>>>> has no need to do atomic clear bit since no user on the page at that
>>>> moment. and no need get_page() before lru bit clear in isolate_lru_page,
>>>> since it '(1) Must be called with an elevated refcount on the page'.
>>>>
>>>> As Andrew Morton mentioned this change would dirty cacheline for page
>>>> isn't on LRU. But the lost would be acceptable with Rong Chen
>>>> <rong.a.chen@intel.com> report:
>>>> https://lkml.org/lkml/2020/3/4/173
>>>>
>>
>> ...
>>
>>>> diff --git a/mm/swap.c b/mm/swap.c
>>>> index f645965fde0e..5092fe9c8c47 100644
>>>> --- a/mm/swap.c
>>>> +++ b/mm/swap.c
>>>> @@ -83,10 +83,9 @@ static void __page_cache_release(struct page *page)
>>>>                 struct lruvec *lruvec;
>>>>                 unsigned long flags;
>>>>
>>>> +               __ClearPageLRU(page);
>>>>                 spin_lock_irqsave(&pgdat->lru_lock, flags);
>>>>                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
>>>> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
>>>> -               __ClearPageLRU(page);
>>>>                 del_page_from_lru_list(page, lruvec, page_off_lru(page));
>>>>                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
>>>>         }
>>>
>>> So this piece doesn't make much sense to me. Why not use
>>> TestClearPageLRU(page) here? Just a few lines above you are testing
>>> for PageLRU(page) and it seems like if you are going to go for an
>>> atomic test/clear and then remove the page from the LRU list you
>>> should be using it here as well otherwise it seems like you could run
>>> into a potential collision since you are testing here without clearing
>>> the bit.
>>>
>>
>> Hi Alex,
>>
>> Thanks a lot for comments!
>>
>> In this func's call path __page_cache_release, the page is unlikely be
>> ClearPageLRU, since this page isn't used by anyone, and going to be freed.
>> just __ClearPageLRU would be safe, and could save a non lru page flags disturb.
> 
> So if I understand what you are saying correctly you are indicating
> that this page should likely not have the LRU flag set and that we
> just transitioned it from 1 -> 0 so there should be nobody else
> accessing it correct?
> 
> It might be useful to document this somewhere. Essentially what we are
> doing then is breaking this up into the following cases.
> 
> 1. Setting the LRU bit requires holding the LRU lock
> 2. Clearing the LRU bit requires either:
>         a. Use of atomic operations if page count is 1 or more
>         b. Non-atomic operations can be used if we cleared last reference count
> 
> Is my understanding on this correct? So we have essentially two
> scenarios, one for the get_page_unless_zero case, and another with the
> put_page_testzero.

the summary isn't incorrect. 
The the points for me are:
1, Generally, the lru bit indicated if the page on lru list, just in some temporary
moment(isolating), the page may have no bit set when it's on lru list.  that imply
the page must be on lru list when the lru bit is set.
2, have to remove lru bit before delete it from lru list.

> 
>>>> @@ -878,9 +877,8 @@ void release_pages(struct page **pages, int nr)
>>>>                                 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
>>>>                         }
>>>>
>>>> -                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
>>>> -                       VM_BUG_ON_PAGE(!PageLRU(page), page);
>>>>                         __ClearPageLRU(page);
>>>> +                       lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
>>>>                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
>>>>                 }
>>>>
>>>
>>> Same here. You are just moving the flag clearing, but you didn't
>>> combine it with the test. It seems like if you are expecting this to
>>> be treated as an atomic operation. It should be a relatively low cost
>>> to do since you already should own the cacheline as a result of
>>> calling put_page_testzero so I am not sure why you are not combining
>>> the two.
>>
>> before the ClearPageLRU, there is a put_page_testzero(), that means no one using
>> this page, and isolate_lru_page can not run on this page the in func checking.
>>         VM_BUG_ON_PAGE(!page_count(page), page);
>> So it would be safe here.
> 
> Okay, so this is another 2b case as defined above then.
> 
>>>
>>>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>>>> index c1c4259b4de5..18986fefd49b 100644
>>>> --- a/mm/vmscan.c
>>>> +++ b/mm/vmscan.c
>>>> @@ -1548,16 +1548,16 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
>>>>  {
>>>>         int ret = -EINVAL;
>>>>
>>>> -       /* Only take pages on the LRU. */
>>>> -       if (!PageLRU(page))
>>>> -               return ret;
>>>> -
>>>>         /* Compaction should not handle unevictable pages but CMA can do so */
>>>>         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
>>>>                 return ret;
>>>>
>>>>         ret = -EBUSY;
>>>>
>>>> +       /* Only take pages on the LRU. */
>>>> +       if (!PageLRU(page))
>>>> +               return ret;
>>>> +
>>>>         /*
>>>>          * To minimise LRU disruption, the caller can indicate that it only
>>>>          * wants to isolate pages it will be able to operate on without
>>>> @@ -1671,8 +1671,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
>>>>                 page = lru_to_page(src);
>>>>                 prefetchw_prev_lru_page(page, src, flags);
>>>>
>>>> -               VM_BUG_ON_PAGE(!PageLRU(page), page);
>>>> -
>>>>                 nr_pages = compound_nr(page);
>>>>                 total_scan += nr_pages;
>>>>
>>>
>>> So effectively the changes here are making it so that a !PageLRU page
>>> will cycle to the start of the LRU list. Now if I understand correctly
>>> we are guaranteed that if the flag is not set it cannot be set while
>>> we are holding the lru_lock, however it can be cleared while we are
>>> holding the lock, correct? Thus that is why isolate_lru_pages has to
>>> call TestClearPageLRU after the earlier check in __isolate_lru_page.
>>
>> Right.
>>
>>>
>>> It might make it more readable to pull in the later patch that
>>> modifies isolate_lru_pages that has it using TestClearPageLRU.
>> As to this change, It has to do in this patch, since any TestClearPageLRU may
>> cause lru bit miss in the lru list, so the precondication check has to
>> removed here.
> 
> So I think some of my cognitive dissonance is from the fact that you
> really are doing two different things here. You aren't really
> implementing the full TestClearPageLRU until patch 15. So this patch
> is doing part of 2a and 2b, and then patch 15 is following up and
> completing the 2a cases. I still think it might make more sense to
> pull out the pieces related to 2b and move them into a patch before
> this with documentation explaining that there should be no competition
> for the LRU flag because the page has transitioned to a reference
> count of zero. Then take the remaining bits and combine them with
> patch 15 since the description for the two is pretty similar.
> 

Good suggestion, I consider this.

Thanks
Alex Shi July 19, 2020, 11:24 a.m. UTC | #6
在 2020/7/19 下午12:45, Alex Shi 写道:
>>>
>>>> It might make it more readable to pull in the later patch that
>>>> modifies isolate_lru_pages that has it using TestClearPageLRU.
>>> As to this change, It has to do in this patch, since any TestClearPageLRU may
>>> cause lru bit miss in the lru list, so the precondication check has to
>>> removed here.
>> So I think some of my cognitive dissonance is from the fact that you
>> really are doing two different things here. You aren't really
>> implementing the full TestClearPageLRU until patch 15. So this patch
>> is doing part of 2a and 2b, and then patch 15 is following up and
>> completing the 2a cases. I still think it might make more sense to
>> pull out the pieces related to 2b and move them into a patch before
>> this with documentation explaining that there should be no competition
>> for the LRU flag because the page has transitioned to a reference
>> count of zero. Then take the remaining bits and combine them with
>> patch 15 since the description for the two is pretty similar.
>>


As to the patch split suggest, actually, Hugh and I talked about a few weeks 
ago when he give me these changes. We both thought keep these changes in this
patch looks better at that time.
If it make you confuse, don't know a changed commit log make it better?

Thanks
Alex

    mm/lru: introduce TestClearPageLRU

    Currently lru_lock still guards both lru list and page's lru bit, that's
    ok. but if we want to use specific lruvec lock on the page, we need to
    pin down the page's lruvec/memcg during locking. Just taking lruvec
    lock first may be undermined by the page's memcg charge/migration. To
    fix this problem, we could take out the page's lru bit clear and use
    it as pin down action to block the memcg changes. That's the reason
    for new atomic func TestClearPageLRU. So now isolating a page need both
    actions: TestClearPageLRU and hold the lru_lock.

    This patch combines PageLRU check and ClearPageLRU into a macro func
    TestClearPageLRU. This function will be used as page isolation
    precondition to prevent other isolations some where else.
    Then there are may non PageLRU page on lru list, need to remove BUG
    checking accordingly.

    There 2 rules for lru bit:
    1, the lru bit still indicate if a page on lru list, just
    in some temporary moment(isolating), the page may have no lru bit when
    it's on lru list.  but the page still must be on lru list when the
    lru bit is set.
    2, have to remove lru bit before delete it from lru list.

    Hugh Dickins pointed that when a page is in freeing path and no one is
    possible to take it, non atomic lru bit clearing is better, like in
    __page_cache_release and release_pages.
    ANd no need get_page() before lru bit clear in isolate_lru_page,
    since it '(1) Must be called with an elevated refcount on the page'.

    As Andrew Morton mentioned this change would dirty cacheline for page
    isn't on LRU. But the lost would be acceptable with Rong Chen
    <rong.a.chen@intel.com> report:
    https://lkml.org/lkml/2020/3/4/173

    Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
    Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
    Cc: Hugh Dickins <hughd@google.com>
    Cc: Johannes Weiner <hannes@cmpxchg.org>
    Cc: Michal Hocko <mhocko@kernel.org>
    Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: linux-kernel@vger.kernel.org
    Cc: cgroups@vger.kernel.org
    Cc: linux-mm@kvack.org
diff mbox series

Patch

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6be1aa559b1e..9554ed1387dc 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -326,6 +326,7 @@  static inline void page_init_poison(struct page *page, size_t size)
 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+	TESTCLEARFLAG(LRU, lru, PF_HEAD)
 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
 	TESTCLEARFLAG(Active, active, PF_HEAD)
 PAGEFLAG(Workingset, workingset, PF_HEAD)
diff --git a/mm/mlock.c b/mm/mlock.c
index f8736136fad7..228ba5a8e0a5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -108,13 +108,12 @@  void mlock_vma_page(struct page *page)
  */
 static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
 {
-	if (PageLRU(page)) {
+	if (TestClearPageLRU(page)) {
 		struct lruvec *lruvec;
 
 		lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
 		if (getpage)
 			get_page(page);
-		ClearPageLRU(page);
 		del_page_from_lru_list(page, lruvec, page_lru(page));
 		return true;
 	}
diff --git a/mm/swap.c b/mm/swap.c
index f645965fde0e..5092fe9c8c47 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -83,10 +83,9 @@  static void __page_cache_release(struct page *page)
 		struct lruvec *lruvec;
 		unsigned long flags;
 
+		__ClearPageLRU(page);
 		spin_lock_irqsave(&pgdat->lru_lock, flags);
 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
-		VM_BUG_ON_PAGE(!PageLRU(page), page);
-		__ClearPageLRU(page);
 		del_page_from_lru_list(page, lruvec, page_off_lru(page));
 		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 	}
@@ -878,9 +877,8 @@  void release_pages(struct page **pages, int nr)
 				spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
 			}
 
-			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
-			VM_BUG_ON_PAGE(!PageLRU(page), page);
 			__ClearPageLRU(page);
+			lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
 			del_page_from_lru_list(page, lruvec, page_off_lru(page));
 		}
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c1c4259b4de5..18986fefd49b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1548,16 +1548,16 @@  int __isolate_lru_page(struct page *page, isolate_mode_t mode)
 {
 	int ret = -EINVAL;
 
-	/* Only take pages on the LRU. */
-	if (!PageLRU(page))
-		return ret;
-
 	/* Compaction should not handle unevictable pages but CMA can do so */
 	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
 		return ret;
 
 	ret = -EBUSY;
 
+	/* Only take pages on the LRU. */
+	if (!PageLRU(page))
+		return ret;
+
 	/*
 	 * To minimise LRU disruption, the caller can indicate that it only
 	 * wants to isolate pages it will be able to operate on without
@@ -1671,8 +1671,6 @@  static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 		page = lru_to_page(src);
 		prefetchw_prev_lru_page(page, src, flags);
 
-		VM_BUG_ON_PAGE(!PageLRU(page), page);
-
 		nr_pages = compound_nr(page);
 		total_scan += nr_pages;
 
@@ -1769,21 +1767,19 @@  int isolate_lru_page(struct page *page)
 	VM_BUG_ON_PAGE(!page_count(page), page);
 	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
 
-	if (PageLRU(page)) {
+	if (TestClearPageLRU(page)) {
 		pg_data_t *pgdat = page_pgdat(page);
 		struct lruvec *lruvec;
+		int lru = page_lru(page);
 
-		spin_lock_irq(&pgdat->lru_lock);
+		get_page(page);
 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
-		if (PageLRU(page)) {
-			int lru = page_lru(page);
-			get_page(page);
-			ClearPageLRU(page);
-			del_page_from_lru_list(page, lruvec, lru);
-			ret = 0;
-		}
+		spin_lock_irq(&pgdat->lru_lock);
+		del_page_from_lru_list(page, lruvec, lru);
 		spin_unlock_irq(&pgdat->lru_lock);
+		ret = 0;
 	}
+
 	return ret;
 }