@@ -133,7 +133,7 @@ void __init init_pointer_table(void *table, int type)
/* unreserve the page so it's possible to free that page */
__ClearPageReserved(PD_PAGE(dp));
- init_page_count(PD_PAGE(dp));
+ page_ref_init(PD_PAGE(dp));
return;
}
@@ -2451,7 +2451,7 @@ extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
static inline void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
- init_page_count(page);
+ page_ref_init(page);
__free_page(page);
adjust_managed_page_count(page, 1);
}
@@ -107,10 +107,14 @@ static inline void folio_set_count(struct folio *folio, int v)
}
/*
- * Setup the page count before being freed into the page allocator for
- * the first time (boot or memory hotplug)
+ * Setup the page refcount to one before being freed into the page allocator.
+ * The memory might not be initialized and therefore there cannot be any
+ * assumptions about the current value of page->_refcount. This call should be
+ * done during boot when memory is being initialized, during memory hotplug
+ * when new memory is added, or when a previous reserved memory is unreserved
+ * this is the first time kernel take control of the given memory.
*/
-static inline void init_page_count(struct page *page)
+static inline void page_ref_init(struct page *page)
{
set_page_count(page, 1);
}
@@ -1569,7 +1569,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
{
mm_zero_struct_page(page);
set_page_links(page, zone, nid, pfn);
- init_page_count(page);
+ page_ref_init(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
page_kasan_tag_reset(page);