diff mbox series

[46/75] mm/mlock: Turn clear_page_mlock() into folio_end_mlock()

Message ID 20220204195852.1751729-47-willy@infradead.org (mailing list archive)
State New
Headers show
Series MM folio patches for 5.18 | expand

Commit Message

Matthew Wilcox Feb. 4, 2022, 7:58 p.m. UTC
Add a clear_page_mlock() wrapper function.  It looks like all
callers were already passing a head page, but if they weren't,
this will fix an accounting bug.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/folio-compat.c |  5 +++++
 mm/internal.h     | 15 +++------------
 mm/mlock.c        | 28 +++++++++++++++++-----------
 3 files changed, 25 insertions(+), 23 deletions(-)
diff mbox series

Patch

diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 46fa179e32fb..bcb037d9cec3 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -164,3 +164,8 @@  void putback_lru_page(struct page *page)
 {
 	folio_putback_lru(page_folio(page));
 }
+
+void clear_page_mlock(struct page *page)
+{
+	folio_end_mlock(page_folio(page));
+}
diff --git a/mm/internal.h b/mm/internal.h
index 7f1db0f1a8bc..041c76a4c284 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -416,17 +416,8 @@  extern unsigned int munlock_vma_page(struct page *page);
 
 extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
 			      unsigned long len);
-
-/*
- * Clear the page's PageMlocked().  This can be useful in a situation where
- * we want to unconditionally remove a page from the pagecache -- e.g.,
- * on truncation or freeing.
- *
- * It is legal to call this function for any page, mlocked or not.
- * If called for a page that is still mapped by mlocked vmas, all we do
- * is revert to lazy LRU behaviour -- semantics are not broken.
- */
-extern void clear_page_mlock(struct page *page);
+void folio_end_mlock(struct folio *folio);
+void clear_page_mlock(struct page *page);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -503,7 +494,7 @@  static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 }
 #else /* !CONFIG_MMU */
 static inline void unmap_mapping_folio(struct folio *folio) { }
-static inline void clear_page_mlock(struct page *page) { }
+static inline void folio_end_mlock(struct folio *folio) { }
 static inline void mlock_vma_page(struct page *page) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
diff --git a/mm/mlock.c b/mm/mlock.c
index 24d0809cacba..ff067d64acc5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -55,31 +55,37 @@  EXPORT_SYMBOL(can_do_mlock);
  */
 
 /*
- *  LRU accounting for clear_page_mlock()
+ * Clear the folio's PageMlocked().  This can be useful in a situation where
+ * we want to unconditionally remove a folio from the pagecache -- e.g.,
+ * on truncation or freeing.
+ *
+ * It is legal to call this function for any folio, mlocked or not.
+ * If called for a folio that is still mapped by mlocked vmas, all we do
+ * is revert to lazy LRU behaviour -- semantics are not broken.
  */
-void clear_page_mlock(struct page *page)
+void folio_end_mlock(struct folio *folio)
 {
-	int nr_pages;
+	long nr_pages;
 
-	if (!TestClearPageMlocked(page))
+	if (!folio_test_clear_mlocked(folio))
 		return;
 
-	nr_pages = thp_nr_pages(page);
-	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+	nr_pages = folio_nr_pages(folio);
+	zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
 	count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
 	/*
-	 * The previous TestClearPageMlocked() corresponds to the smp_mb()
+	 * The previous folio_test_clear_mlocked() corresponds to the smp_mb()
 	 * in __pagevec_lru_add_fn().
 	 *
 	 * See __pagevec_lru_add_fn for more explanation.
 	 */
-	if (!isolate_lru_page(page)) {
-		putback_lru_page(page);
+	if (!folio_isolate_lru(folio)) {
+		folio_putback_lru(folio);
 	} else {
 		/*
-		 * We lost the race. the page already moved to evictable list.
+		 * We lost the race. the folio already moved to evictable list.
 		 */
-		if (PageUnevictable(page))
+		if (folio_test_unevictable(folio))
 			count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
 	}
 }