@@ -1211,26 +1211,6 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
nr_nodes--)
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static void destroy_compound_gigantic_page(struct page *page,
- unsigned int order)
-{
- int i;
- int nr_pages = 1 << order;
- struct page *p = page + 1;
-
- atomic_set(compound_mapcount_ptr(page), 0);
- if (hpage_pincount_available(page))
- atomic_set(compound_pincount_ptr(page), 0);
-
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
- clear_compound_head(p);
- set_page_refcounted(p);
- }
-
- set_compound_order(page, 0);
- __ClearPageHead(page);
-}
-
static void free_gigantic_page(struct page *page, unsigned int order)
{
/*
@@ -1288,8 +1268,6 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return NULL;
}
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
-static inline void destroy_compound_gigantic_page(struct page *page,
- unsigned int order) { }
#endif
static void update_and_free_page(struct hstate *h, struct page *page)
@@ -620,4 +620,6 @@ struct migration_target_control {
gfp_t gfp_mask;
};
+void destroy_compound_gigantic_page(struct page *page,
+ unsigned int order);
#endif /* __MM_INTERNAL_H */
@@ -2138,7 +2138,12 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
{
struct page *page;
- page = __alloc_pages(gfp, order, nid);
+ if (order > MAX_ORDER) {
+ page = alloc_contig_pages(1UL<<order, gfp, nid, NULL);
+ if (page && (gfp & __GFP_COMP))
+ prep_compound_page(page, order);
+ } else
+ page = __alloc_pages(gfp, order, nid);
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
if (!static_branch_likely(&vm_numa_stat_key))
return page;
@@ -2212,6 +2217,14 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
+
+ if (order > MAX_ORDER) {
+ page = alloc_contig_pages(1UL<<order, gfp,
+ hpage_node, NULL);
+ if (page && (gfp & __GFP_COMP))
+ prep_compound_page(page, order);
+ goto out;
+ }
/*
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
@@ -1480,6 +1480,24 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
}
}
+void destroy_compound_gigantic_page(struct page *page,
+ unsigned int order)
+{
+ int i;
+ int nr_pages = 1 << order;
+ struct page *p = page + 1;
+
+ atomic_set(compound_mapcount_ptr(page), 0);
+ for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ clear_compound_head(p);
+ set_page_refcounted(p);
+ }
+
+ set_compound_order(page, 0);
+ __ClearPageHead(page);
+ set_page_refcounted(page);
+}
+
static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
@@ -1489,11 +1507,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order, true))
return;
- migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
- __count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
+ if (order >= MAX_ORDER) {
+ destroy_compound_gigantic_page(page, order);
+ free_contig_range(page_to_pfn(page), 1 << order);
+ } else {
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ local_irq_save(flags);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
+ local_irq_restore(flags);
+ }
}
void __free_pages_core(struct page *page, unsigned int order)