@@ -437,7 +437,7 @@ static unsigned long required_movablecore __initdata;
static unsigned long required_movablecore_percent __initdata;
static unsigned long min_dmb_pfn[MAX_NUMNODES] __initdata;
static unsigned long max_dmb_pfn[MAX_NUMNODES] __initdata;
-static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
+static unsigned long zone_movable_pfn[MAX_NUMNODES];
bool mirrored_kernelcore __initdata_memblock;
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
@@ -9460,6 +9460,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
unsigned long i, end_pfn = start_pfn + nr_pages;
struct page *page;
+ if (dmb_intersects(start_pfn, end_pfn))
+ return false;
+
for (i = start_pfn; i < end_pfn; i++) {
page = pfn_to_online_page(i);
if (!page)
@@ -9516,7 +9519,10 @@ struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
gfp_zone(gfp_mask), nodemask) {
spin_lock_irqsave(&zone->lock, flags);
- pfn = ALIGN(zone->zone_start_pfn, nr_pages);
+ if (zone_idx(zone) == ZONE_MOVABLE && zone_movable_pfn[nid])
+ pfn = ALIGN(zone_movable_pfn[nid], nr_pages);
+ else
+ pfn = ALIGN(zone->zone_start_pfn, nr_pages);
while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
/*
Designated Movable Blocks are skipped when attempting to allocate contiguous pages. Doing per page validation across all spanned pages within a zone can be extra inefficient when Designated Movable Blocks create large overlaps between zones. Use dmb_intersects() within pfn_range_valid_contig as an early check to signal the range is not valid. The zone_movable_pfn array which represents the start of non- overlapped ZONE_MOVABLE on the node is now preserved to be used at runtime to skip over any DMB-only portion of the zone. Signed-off-by: Doug Berger <opendmb@gmail.com> --- mm/page_alloc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)