@@ -741,7 +741,7 @@ extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
-void free_unref_page(struct page *page, unsigned int order);
+void free_frozen_pages(struct page *page, unsigned int order);
void free_unref_folios(struct folio_batch *fbatch);
extern void zone_pcp_reset(struct zone *zone);
@@ -2626,9 +2626,9 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
return high;
}
-static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
- struct page *page, int migratetype,
- unsigned int order)
+static void free_frozen_page_commit(struct zone *zone,
+ struct per_cpu_pages *pcp, struct page *page, int migratetype,
+ unsigned int order)
{
int high, batch;
int pindex;
@@ -2677,7 +2677,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
/*
* Free a pcp page
*/
-void free_unref_page(struct page *page, unsigned int order)
+void free_frozen_pages(struct page *page, unsigned int order)
{
unsigned long __maybe_unused UP_flags;
struct per_cpu_pages *pcp;
@@ -2713,7 +2713,7 @@ void free_unref_page(struct page *page, unsigned int order)
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
- free_unref_page_commit(zone, pcp, page, migratetype, order);
+ free_frozen_page_commit(zone, pcp, page, migratetype, order);
pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, FPI_NONE);
@@ -2777,7 +2777,7 @@ void free_unref_folios(struct folio_batch *folios)
/*
* Free isolated pages directly to the
- * allocator, see comment in free_unref_page.
+ * allocator, see comment in free_frozen_pages.
*/
if (is_migrate_isolate(migratetype)) {
free_one_page(zone, &folio->page, pfn,
@@ -2808,7 +2808,7 @@ void free_unref_folios(struct folio_batch *folios)
migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(&folio->page);
- free_unref_page_commit(zone, pcp, &folio->page, migratetype,
+ free_frozen_page_commit(zone, pcp, &folio->page, migratetype,
order);
}
@@ -4871,11 +4871,11 @@ void __free_pages(struct page *page, unsigned int order)
struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page))
- free_unref_page(page, order);
+ free_frozen_pages(page, order);
else if (!head) {
pgalloc_tag_sub_pages(tag, (1 << order) - 1);
while (order-- > 0)
- free_unref_page(page + (1 << order), order);
+ free_frozen_pages(page + (1 << order), order);
}
}
EXPORT_SYMBOL(__free_pages);
@@ -86,7 +86,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count))
- free_unref_page(page, compound_order(page));
+ free_frozen_pages(page, compound_order(page));
}
EXPORT_SYMBOL(__page_frag_cache_drain);
@@ -138,7 +138,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
goto refill;
if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) {
- free_unref_page(page,
+ free_frozen_pages(page,
encoded_page_decode_order(encoded_page));
goto refill;
}
@@ -166,6 +166,6 @@ void page_frag_free(void *addr)
struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page)))
- free_unref_page(page, compound_order(page));
+ free_frozen_pages(page, compound_order(page));
}
EXPORT_SYMBOL(page_frag_free);
@@ -109,7 +109,7 @@ void __folio_put(struct folio *folio)
page_cache_release(folio);
folio_unqueue_deferred_split(folio);
mem_cgroup_uncharge(folio);
- free_unref_page(&folio->page, folio_order(folio));
+ free_frozen_pages(&folio->page, folio_order(folio));
}
EXPORT_SYMBOL(__folio_put);