@@ -155,7 +155,7 @@ u64 stable_page_flags(const struct page *page)
else if (folio_test_large(folio)) {
if ((k & PG_lru) || is_anon)
u |= 1 << KPF_THP;
- else if (is_huge_zero_page(&folio->page)) {
+ else if (is_huge_zero_folio(folio)) {
u |= 1 << KPF_ZERO_PAGE;
u |= 1 << KPF_THP;
}
@@ -356,6 +356,11 @@ static inline bool is_huge_zero_page(const struct page *page)
return READ_ONCE(huge_zero_page) == page;
}
+static inline bool is_huge_zero_folio(const struct folio *folio)
+{
+ return READ_ONCE(huge_zero_page) == &folio->page;
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
@@ -485,6 +490,11 @@ static inline bool is_huge_zero_page(const struct page *page)
return false;
}
+static inline bool is_huge_zero_folio(const struct folio *folio)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return false;
@@ -789,12 +789,12 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
}
#endif
-static inline bool is_transparent_hugepage(struct folio *folio)
+static inline bool is_transparent_hugepage(const struct folio *folio)
{
if (!folio_test_large(folio))
return false;
- return is_huge_zero_page(&folio->page) ||
+ return is_huge_zero_folio(folio) ||
folio_test_large_rmappable(folio);
}
@@ -3085,7 +3085,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
}
- is_hzp = is_huge_zero_page(&folio->page);
+ is_hzp = is_huge_zero_folio(folio);
if (is_hzp) {
pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
return -EBUSY;
@@ -510,7 +510,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
return;
}
folio = pfn_folio(pmd_pfn(*pmd));
- if (is_huge_zero_page(&folio->page)) {
+ if (is_huge_zero_folio(folio)) {
walk->action = ACTION_CONTINUE;
return;
}
@@ -985,7 +985,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
struct folio *folio = folios->folios[i];
unsigned int nr_refs = refs ? refs[i] : 1;
- if (is_huge_zero_page(&folio->page))
+ if (is_huge_zero_folio(folio))
continue;
if (folio_is_zone_device(folio)) {
@@ -301,7 +301,7 @@ void free_page_and_swap_cache(struct page *page)
struct folio *folio = page_folio(page);
free_swap_cache(folio);
- if (!is_huge_zero_page(page))
+ if (!is_huge_zero_folio(folio))
folio_put(folio);
}
@@ -1699,7 +1699,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
!pmd_none(dst_pmdval)) {
struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
- if (!folio || (!is_huge_zero_page(&folio->page) &&
+ if (!folio || (!is_huge_zero_folio(folio) &&
!PageAnonExclusive(&folio->page))) {
spin_unlock(ptl);
err = -EBUSY;
This is the folio equivalent of is_huge_zero_page(). It doesn't add any efficiency, but it does prevent the caller from passing a tail page and getting confused when the predicate returns false. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/proc/page.c | 2 +- include/linux/huge_mm.h | 10 ++++++++++ mm/huge_memory.c | 6 +++--- mm/mempolicy.c | 2 +- mm/swap.c | 2 +- mm/swap_state.c | 2 +- mm/userfaultfd.c | 2 +- 7 files changed, 18 insertions(+), 8 deletions(-)