@@ -28,6 +28,19 @@ typedef struct { unsigned long pd; } hugepd_t;
#include <linux/shm.h>
#include <asm/tlbflush.h>
+enum {
+ SUBPAGE_INDEX_ACTIVE = 1, /* reuse page flags of PG_private */
+ SUBPAGE_INDEX_TEMPORARY, /* reuse page->mapping */
+#ifdef CONFIG_CGROUP_HUGETLB
+ SUBPAGE_INDEX_CGROUP = SUBPAGE_INDEX_TEMPORARY,/* reuse page->private */
+ SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
+#endif
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+ SUBPAGE_INDEX_HWPOISON, /* reuse page->private */
+#endif
+ NR_USED_SUBPAGE,
+};
+
struct hugepage_subpool {
spinlock_t lock;
long count;
@@ -24,8 +24,9 @@ struct file_region;
/*
* Minimum page order trackable by hugetlb cgroup.
* At least 4 pages are necessary for all the tracking information.
- * The second tail page (hpage[2]) is the fault usage cgroup.
- * The third tail page (hpage[3]) is the reservation usage cgroup.
+ * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
+ * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
+ * is the reservation usage cgroup.
*/
#define HUGETLB_CGROUP_MIN_ORDER 2
@@ -66,9 +67,9 @@ __hugetlb_cgroup_from_page(struct page *page, bool rsvd)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
if (rsvd)
- return (struct hugetlb_cgroup *)page[3].private;
+ return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
else
- return (struct hugetlb_cgroup *)page[2].private;
+ return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
}
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
@@ -90,9 +91,11 @@ static inline int __set_hugetlb_cgroup(struct page *page,
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
if (rsvd)
- page[3].private = (unsigned long)h_cg;
+ set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
+ (unsigned long)h_cg);
else
- page[2].private = (unsigned long)h_cg;
+ set_page_private(page + SUBPAGE_INDEX_CGROUP,
+ (unsigned long)h_cg);
return 0;
}
@@ -1356,6 +1356,7 @@ static inline void __update_and_free_page(struct hstate *h, struct page *page)
schedule_work(&hpage_update_work);
}
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
static inline void hwpoison_subpage_deliver(struct hstate *h, struct page *head)
{
struct page *page;
@@ -1363,7 +1364,7 @@ static inline void hwpoison_subpage_deliver(struct hstate *h, struct page *head)
if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h))
return;
- page = head + page_private(head + 4);
+ page = head + page_private(head + SUBPAGE_INDEX_HWPOISON);
/*
* Move PageHWPoison flag from head page to the raw error page,
@@ -1382,7 +1383,7 @@ static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
return;
if (free_vmemmap_pages_per_hpage(h)) {
- set_page_private(head + 4, page - head);
+ set_page_private(head + SUBPAGE_INDEX_HWPOISON, page - head);
} else if (page != head) {
/*
* Move PageHWPoison flag from head page to the raw error page,
@@ -1392,6 +1393,24 @@ static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
ClearPageHWPoison(head);
}
}
+#else
+static inline void hwpoison_subpage_deliver(struct hstate *h, struct page *head)
+{
+}
+
+static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
+ struct page *page)
+{
+ if (PageHWPoison(head) && page != head) {
+ /*
+ * Move PageHWPoison flag from head page to the raw error page,
+ * which makes any subpages rather than the error page reusable.
+ */
+ SetPageHWPoison(page);
+ ClearPageHWPoison(head);
+ }
+}
+#endif
static void update_and_free_page(struct hstate *h, struct page *page)
{
@@ -1459,20 +1478,20 @@ struct hstate *size_to_hstate(unsigned long size)
bool page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
- return PageHead(page) && PagePrivate(&page[1]);
+ return PageHead(page) && PagePrivate(&page[SUBPAGE_INDEX_ACTIVE]);
}
/* never called for tail page */
static void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
- SetPagePrivate(&page[1]);
+ SetPagePrivate(&page[SUBPAGE_INDEX_ACTIVE]);
}
static void clear_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
- ClearPagePrivate(&page[1]);
+ ClearPagePrivate(&page[SUBPAGE_INDEX_ACTIVE]);
}
/*
@@ -1484,17 +1503,17 @@ static inline bool PageHugeTemporary(struct page *page)
if (!PageHuge(page))
return false;
- return (unsigned long)page[2].mapping == -1U;
+ return (unsigned long)page[SUBPAGE_INDEX_TEMPORARY].mapping == -1U;
}
static inline void SetPageHugeTemporary(struct page *page)
{
- page[2].mapping = (void *)-1U;
+ page[SUBPAGE_INDEX_TEMPORARY].mapping = (void *)-1U;
}
static inline void ClearPageHugeTemporary(struct page *page)
{
- page[2].mapping = NULL;
+ page[SUBPAGE_INDEX_TEMPORARY].mapping = NULL;
}
static void __free_huge_page(struct page *page)
@@ -242,6 +242,14 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
unsigned int nr_pages = pages_per_huge_page(h);
unsigned int vmemmap_pages;
+ /*
+ * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
+ * page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
+ * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
+ */
+ BUILD_BUG_ON(NR_USED_SUBPAGE >=
+ RESERVE_VMEMMAP_SIZE / sizeof(struct page));
+
if (!hugetlb_free_vmemmap_enabled)
return;