@@ -164,3 +164,8 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}
+
+void clear_page_mlock(struct page *page)
+{
+ folio_end_mlock(page_folio(page));
+}
@@ -416,17 +416,8 @@ extern unsigned int munlock_vma_page(struct page *page);
extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
unsigned long len);
-
-/*
- * Clear the page's PageMlocked(). This can be useful in a situation where
- * we want to unconditionally remove a page from the pagecache -- e.g.,
- * on truncation or freeing.
- *
- * It is legal to call this function for any page, mlocked or not.
- * If called for a page that is still mapped by mlocked vmas, all we do
- * is revert to lazy LRU behaviour -- semantics are not broken.
- */
-extern void clear_page_mlock(struct page *page);
+void folio_end_mlock(struct folio *folio);
+void clear_page_mlock(struct page *page);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
@@ -503,7 +494,7 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
#else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
-static inline void clear_page_mlock(struct page *page) { }
+static inline void folio_end_mlock(struct folio *folio) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
@@ -55,31 +55,37 @@ EXPORT_SYMBOL(can_do_mlock);
*/
/*
- * LRU accounting for clear_page_mlock()
+ * Clear the folio's PageMlocked(). This can be useful in a situation where
+ * we want to unconditionally remove a folio from the pagecache -- e.g.,
+ * on truncation or freeing.
+ *
+ * It is legal to call this function for any folio, mlocked or not.
+ * If called for a folio that is still mapped by mlocked vmas, all we do
+ * is revert to lazy LRU behaviour -- semantics are not broken.
*/
-void clear_page_mlock(struct page *page)
+void folio_end_mlock(struct folio *folio)
{
- int nr_pages;
+ long nr_pages;
- if (!TestClearPageMlocked(page))
+ if (!folio_test_clear_mlocked(folio))
return;
- nr_pages = thp_nr_pages(page);
- mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ nr_pages = folio_nr_pages(folio);
+ zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
/*
- * The previous TestClearPageMlocked() corresponds to the smp_mb()
+ * The previous folio_test_clear_mlocked() corresponds to the smp_mb()
* in __pagevec_lru_add_fn().
*
* See __pagevec_lru_add_fn for more explanation.
*/
- if (!isolate_lru_page(page)) {
- putback_lru_page(page);
+ if (!folio_isolate_lru(folio)) {
+ folio_putback_lru(folio);
} else {
/*
- * We lost the race. the page already moved to evictable list.
+ * We lost the race. the folio already moved to evictable list.
*/
- if (PageUnevictable(page))
+ if (folio_test_unevictable(folio))
count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
}
}
Add a clear_page_mlock() wrapper function. It looks like all callers were already passing a head page, but if they weren't, this will fix an accounting bug. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/folio-compat.c | 5 +++++ mm/internal.h | 15 +++------------ mm/mlock.c | 28 +++++++++++++++++----------- 3 files changed, 25 insertions(+), 23 deletions(-)