@@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
#endif
+void free_huge_page(struct folio *folio);
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -165,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
-void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
@@ -1278,13 +1278,9 @@ typedef void compound_page_dtor(struct page *);
enum compound_dtor_id {
NULL_COMPOUND_DTOR,
COMPOUND_PAGE_DTOR,
-#ifdef CONFIG_HUGETLB_PAGE
HUGETLB_PAGE_DTOR,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
TRANSHUGE_PAGE_DTOR,
-#endif
- NR_COMPOUND_DTORS,
+ NR_COMPOUND_DTORS
};
static inline void folio_set_compound_dtor(struct folio *folio,
@@ -1875,13 +1875,12 @@ struct hstate *size_to_hstate(unsigned long size)
return NULL;
}
-void free_huge_page(struct page *page)
+void free_huge_page(struct folio *folio)
{
/*
* Can't pass hstate in here because it is called from the
* compound page destructor.
*/
- struct folio *folio = page_folio(page);
struct hstate *h = folio_hstate(folio);
int nid = folio_nid(folio);
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
@@ -1936,7 +1935,7 @@ void free_huge_page(struct page *page)
spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_hugetlb_folio(h, folio, true);
} else {
- arch_clear_hugepage_flags(page);
+ arch_clear_hugepage_flags(&folio->page);
enqueue_hugetlb_folio(h, folio);
spin_unlock_irqrestore(&hugetlb_lock, flags);
}
@@ -2246,7 +2245,7 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
nodes_allowed, node_alloc_noretry);
if (folio) {
- free_huge_page(&folio->page); /* free it into the hugepage allocator */
+ free_huge_page(folio); /* free it into the hugepage allocator */
return 1;
}
}
@@ -2435,7 +2434,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
folio_set_hugetlb_temporary(folio);
spin_unlock_irq(&hugetlb_lock);
- free_huge_page(&folio->page);
+ free_huge_page(folio);
return NULL;
}
@@ -2547,8 +2546,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{
LIST_HEAD(surplus_list);
- struct folio *folio;
- struct page *page, *tmp;
+ struct folio *folio, *tmp;
int ret;
long i;
long needed, allocated;
@@ -2608,11 +2606,11 @@ static int gather_surplus_pages(struct hstate *h, long delta)
ret = 0;
/* Free the needed pages to the hugetlb pool */
- list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
/* Add the page to the hugetlb allocator */
- enqueue_hugetlb_folio(h, page_folio(page));
+ enqueue_hugetlb_folio(h, folio);
}
free:
spin_unlock_irq(&hugetlb_lock);
@@ -2621,8 +2619,8 @@ static int gather_surplus_pages(struct hstate *h, long delta)
* Free unnecessary surplus pages to the buddy allocator.
* Pages have no ref count, call free_huge_page directly.
*/
- list_for_each_entry_safe(page, tmp, &surplus_list, lru)
- free_huge_page(page);
+ list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
+ free_huge_page(folio);
spin_lock_irq(&hugetlb_lock);
return ret;
@@ -3232,7 +3230,7 @@ static void __init gather_bootmem_prealloc(void)
if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
WARN_ON(folio_test_reserved(folio));
prep_new_hugetlb_folio(h, folio, folio_nid(folio));
- free_huge_page(page); /* add to the hugepage allocator */
+ free_huge_page(folio); /* add to the hugepage allocator */
} else {
/* VERY unlikely inflated ref count on a tail page */
free_gigantic_folio(folio, huge_page_order(h));
@@ -3264,7 +3262,7 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
&node_states[N_MEMORY], NULL);
if (!folio)
break;
- free_huge_page(&folio->page); /* free it into the hugepage allocator */
+ free_huge_page(folio); /* free it into the hugepage allocator */
}
cond_resched();
}
@@ -3658,7 +3656,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
prep_compound_page(subpage, target_hstate->order);
folio_change_private(inner_folio, NULL);
prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
- free_huge_page(subpage);
+ free_huge_page(inner_folio);
}
mutex_unlock(&target_hstate->resize_lock);
@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
[NULL_COMPOUND_DTOR] = NULL,
[COMPOUND_PAGE_DTOR] = free_compound_page,
-#ifdef CONFIG_HUGETLB_PAGE
- [HUGETLB_PAGE_DTOR] = free_huge_page,
-#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
#endif
@@ -622,6 +619,11 @@ void destroy_large_folio(struct folio *folio)
{
enum compound_dtor_id dtor = folio->_folio_dtor;
+ if (folio_test_hugetlb(folio)) {
+ free_huge_page(folio);
+ return;
+ }
+
VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
compound_page_dtors[dtor](&folio->page);
}
Indirect calls are expensive, thanks to Spectre. Convert this one to a direct call, and pass a folio instead of the head page to save a few more instructions. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/hugetlb.h | 3 ++- include/linux/mm.h | 6 +----- mm/hugetlb.c | 26 ++++++++++++-------------- mm/page_alloc.c | 8 +++++--- 4 files changed, 20 insertions(+), 23 deletions(-)