@@ -405,20 +405,23 @@ struct folio {
unsigned long _flags_2;
unsigned long _head_2;
/* public: */
- void *_hugetlb_subpool;
- void *_hugetlb_cgroup;
- void *_hugetlb_cgroup_rsvd;
- void *_hugetlb_hwpoison;
+ struct list_head _deferred_list;
/* private: the union with struct page is transitional */
};
+ struct page __page_2;
+ };
+ union {
struct {
- unsigned long _flags_2a;
- unsigned long _head_2a;
+ unsigned long _flags_3;
+ unsigned long _head_3;
/* public: */
- struct list_head _deferred_list;
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
- struct page __page_2;
+ struct page __page_3;
};
};
@@ -455,8 +458,12 @@ FOLIO_MATCH(_refcount, _refcount_1);
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
-FOLIO_MATCH(flags, _flags_2a);
-FOLIO_MATCH(compound_head, _head_2a);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 3 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_3);
+FOLIO_MATCH(compound_head, _head_3);
#undef FOLIO_MATCH
/**
@@ -1646,7 +1646,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
folio_ref_unfreeze(folio, 1);
- INIT_LIST_HEAD(&folio->_deferred_list);
hugetlb_free_folio(folio);
}
@@ -971,6 +971,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
goto out;
}
break;
+ case 3:
+ /* the third tail page: hugetlb specifics overlap ->mappings */
+ if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
+ break;
+ fallthrough;
default:
if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page");
Let's just move the hugetlb specific stuff to a separate page, and stop letting it overlay other fields for now. This frees up some space in page[2], which we will use on 32bit to free up some space in page[1]. While we could move these things to page[3] instead, it's cleaner to just move the hugetlb specific things out of the way and pack the core-folio stuff as tight as possible. ... and we can minimize the work required in dump_folio. We can now avoid re-initializing &folio->_deferred_list in hugetlb code. Hopefully dynamically allocating "strut folio" in the future will further clean this up. Signed-off-by: David Hildenbrand <david@redhat.com> --- include/linux/mm_types.h | 27 +++++++++++++++++---------- mm/hugetlb.c | 1 - mm/page_alloc.c | 5 +++++ 3 files changed, 22 insertions(+), 11 deletions(-)