@@ -169,6 +169,24 @@ static inline struct page *mlock_new(struct page *page)
return (struct page *)((unsigned long)page + NEW_PAGE);
}
+static inline struct page *mlock_dummy(struct page *page)
+{
+ return page;
+}
+
+/* return true if mlock_pvec needs to drain */
+static bool mlock_add_and_need_flush(struct pagevec *pvec, struct folio *folio,
+ struct page *(*mlock)(struct page *page))
+{
+ bool ret = false;
+
+ if (!pagevec_add(pvec, mlock(&folio->page)) ||
+ folio_test_large(folio) || lru_cache_disabled())
+ ret = true;
+
+ return ret;
+}
+
/*
* mlock_pagevec() is derived from pagevec_lru_move_fn():
* perhaps that can make use of such page pointer flags in future,
@@ -233,8 +251,7 @@ void mlock_folio(struct folio *folio)
}
folio_get(folio);
- if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
- folio_test_large(folio) || lru_cache_disabled())
+ if (mlock_add_and_need_flush(pvec, folio, mlock_lru))
mlock_pagevec(pvec);
put_cpu_var(mlock_pvec);
}
@@ -253,8 +270,7 @@ void mlock_new_page(struct page *page)
__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
get_page(page);
- if (!pagevec_add(pvec, mlock_new(page)) ||
- PageHead(page) || lru_cache_disabled())
+ if (mlock_add_and_need_flush(pvec, page_folio(page), mlock_new))
mlock_pagevec(pvec);
put_cpu_var(mlock_pvec);
}
@@ -273,8 +289,7 @@ void munlock_page(struct page *page)
*/
get_page(page);
- if (!pagevec_add(pvec, page) ||
- PageHead(page) || lru_cache_disabled())
+ if (mlock_add_and_need_flush(pvec, page_folio(page), mlock_dummy))
mlock_pagevec(pvec);
put_cpu_var(mlock_pvec);
}
Just like the function pagevec_add_and_need_flush does, we can abstract a function mlock_add_and_need_flush to decide whether it is necessary to drain mlock_pvec. No functional change is expected. Signed-off-by: Wei Yang <richard.weiyang@gmail.com> CC: Matthew Wilcox (Oracle) <willy@infradead.org> CC: Hugh Dickins <hughd@google.com> --- mm/mlock.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-)