diff mbox series

[47/75] mm/mlock: Turn mlock_vma_page() into mlock_vma_folio()

Message ID 20220204195852.1751729-48-willy@infradead.org (mailing list archive)
State New
Headers show
Series MM folio patches for 5.18 | expand

Commit Message

Matthew Wilcox Feb. 4, 2022, 7:58 p.m. UTC
Add mlock_vma_page() back as a wrapper.  Saves a few calls to
compound_head() and an assertion that the page is not a tail page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/folio-compat.c |  5 +++++
 mm/internal.h     |  3 ++-
 mm/mlock.c        | 18 +++++++++---------
 3 files changed, 16 insertions(+), 10 deletions(-)

Comments

Mike Rapoport Feb. 7, 2022, 10:46 a.m. UTC | #1
On Fri, Feb 04, 2022 at 07:58:24PM +0000, Matthew Wilcox (Oracle) wrote:
> Add mlock_vma_page() back as a wrapper.  Saves a few calls to
> compound_head() and an assertion that the page is not a tail page.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/folio-compat.c |  5 +++++
>  mm/internal.h     |  3 ++-
>  mm/mlock.c        | 18 +++++++++---------
>  3 files changed, 16 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/folio-compat.c b/mm/folio-compat.c
> index bcb037d9cec3..9cb0867d5b38 100644
> --- a/mm/folio-compat.c
> +++ b/mm/folio-compat.c
> @@ -169,3 +169,8 @@ void clear_page_mlock(struct page *page)
>  {
>  	folio_end_mlock(page_folio(page));
>  }
> +
> +void mlock_vma_page(struct page *page)
> +{
> +	mlock_vma_folio(page_folio(page));
> +}

This will make no-mmu unhappy, e.g. arm allnoconfig:

  CC      mm/folio-compat.o
mm/folio-compat.c:174:6: error: redefinition of 'mlock_vma_page'
  174 | void mlock_vma_page(struct page *page)
      |      ^~~~~~~~~~~~~~
In file included from mm/folio-compat.c:11:
mm/internal.h:501:20: note: previous definition of 'mlock_vma_page' was here
  501 | static inline void mlock_vma_page(struct page *page) { }
      |                    ^~~~~~~~~~~~~~
mm/folio-compat.c: In function 'mlock_vma_page':
mm/folio-compat.c:176:2: error: implicit declaration of function 'mlock_vma_folio'; did you mean 'mlock_vma_page'? [-Werror=implicit-function-declaration]
  176 |  mlock_vma_folio(page_folio(page));
      |  ^~~~~~~~~~~~~~~
      |  mlock_vma_page

This also applies to munlock_vma_page() and page_mlock(). 
For some reason no build yelled about clear_page_mlock()...

> diff --git a/mm/internal.h b/mm/internal.h
> index 041c76a4c284..18b024aa7e59 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -411,7 +411,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
>  /*
>   * must be called with vma's mmap_lock held for read or write, and page locked.
>   */
> -extern void mlock_vma_page(struct page *page);
> +void mlock_vma_page(struct page *page);
> +void mlock_vma_folio(struct folio *folio);
>  extern unsigned int munlock_vma_page(struct page *page);
>  
>  extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
> diff --git a/mm/mlock.c b/mm/mlock.c
> index ff067d64acc5..d998fd5c84bf 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -94,21 +94,21 @@ void folio_end_mlock(struct folio *folio)
>   * Mark page as mlocked if not already.
>   * If page on LRU, isolate and putback to move to unevictable list.
>   */
> -void mlock_vma_page(struct page *page)
> +void mlock_vma_folio(struct folio *folio)
>  {
>  	/* Serialize with page migration */
> -	BUG_ON(!PageLocked(page));
> +	BUG_ON(!folio_test_locked(folio));
>  
> -	VM_BUG_ON_PAGE(PageTail(page), page);
> -	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
> +	VM_BUG_ON_FOLIO(folio_test_large(folio) && folio_test_double_map(folio),
> +			folio);
>  
> -	if (!TestSetPageMlocked(page)) {
> -		int nr_pages = thp_nr_pages(page);
> +	if (!folio_test_set_mlocked(folio)) {
> +		long nr_pages = folio_nr_pages(folio);
>  
> -		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
> +		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
>  		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
> -		if (!isolate_lru_page(page))
> -			putback_lru_page(page);
> +		if (!folio_isolate_lru(folio))
> +			folio_putback_lru(folio);
>  	}
>  }
>  
> -- 
> 2.34.1
> 
>
diff mbox series

Patch

diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index bcb037d9cec3..9cb0867d5b38 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -169,3 +169,8 @@  void clear_page_mlock(struct page *page)
 {
 	folio_end_mlock(page_folio(page));
 }
+
+void mlock_vma_page(struct page *page)
+{
+	mlock_vma_folio(page_folio(page));
+}
diff --git a/mm/internal.h b/mm/internal.h
index 041c76a4c284..18b024aa7e59 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -411,7 +411,8 @@  static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
 /*
  * must be called with vma's mmap_lock held for read or write, and page locked.
  */
-extern void mlock_vma_page(struct page *page);
+void mlock_vma_page(struct page *page);
+void mlock_vma_folio(struct folio *folio);
 extern unsigned int munlock_vma_page(struct page *page);
 
 extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
diff --git a/mm/mlock.c b/mm/mlock.c
index ff067d64acc5..d998fd5c84bf 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -94,21 +94,21 @@  void folio_end_mlock(struct folio *folio)
  * Mark page as mlocked if not already.
  * If page on LRU, isolate and putback to move to unevictable list.
  */
-void mlock_vma_page(struct page *page)
+void mlock_vma_folio(struct folio *folio)
 {
 	/* Serialize with page migration */
-	BUG_ON(!PageLocked(page));
+	BUG_ON(!folio_test_locked(folio));
 
-	VM_BUG_ON_PAGE(PageTail(page), page);
-	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+	VM_BUG_ON_FOLIO(folio_test_large(folio) && folio_test_double_map(folio),
+			folio);
 
-	if (!TestSetPageMlocked(page)) {
-		int nr_pages = thp_nr_pages(page);
+	if (!folio_test_set_mlocked(folio)) {
+		long nr_pages = folio_nr_pages(folio);
 
-		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
 		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
-		if (!isolate_lru_page(page))
-			putback_lru_page(page);
+		if (!folio_isolate_lru(folio))
+			folio_putback_lru(folio);
 	}
 }