@@ -2333,7 +2333,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &cc->zone->free_area[order];
- bool can_steal;
+ bool claim_block;
/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
@@ -2350,7 +2350,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
- true, &can_steal) != -1)
+ true, &claim_block) != -1)
/*
* Movable pages are OK in any pageblock. If we are
* stealing for a non-movable allocation, make sure
@@ -863,7 +863,7 @@ static inline void init_cma_pageblock(struct page *page)
int find_suitable_fallback(struct free_area *area, unsigned int order,
- int migratetype, bool only_stealable, bool *can_steal);
+ int migratetype, bool claim_only, bool *claim_block);
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
@@ -1942,22 +1942,22 @@ static inline bool boost_watermark(struct zone *zone)
/*
* When we are falling back to another migratetype during allocation, try to
- * steal extra free pages from the same pageblocks to satisfy further
- * allocations, instead of polluting multiple pageblocks.
+ * claim entire blocks to satisfy further allocations, instead of polluting
+ * multiple pageblocks.
*
- * If we are stealing a relatively large buddy page, it is likely there will
- * be more free pages in the pageblock, so try to steal them all. For
- * reclaimable and unmovable allocations, we steal regardless of page size,
- * as fragmentation caused by those allocations polluting movable pageblocks
- * is worse than movable allocations stealing from unmovable and reclaimable
- * pageblocks.
+ * If we are stealing a relatively large buddy page, it is likely there will be
+ * more free pages in the pageblock, so try to claim the whole block. For
+ * reclaimable and unmovable allocations, we try to claim the whole block
+ * regardless of page size, as fragmentation caused by those allocations
+ * polluting movable pageblocks is worse than movable allocations stealing from
+ * unmovable and reclaimable pageblocks.
*/
-static bool can_steal_fallback(unsigned int order, int start_mt)
+static bool should_try_claim_block(unsigned int order, int start_mt)
{
/*
* Leaving this order check is intended, although there is
* relaxed order check in next check. The reason is that
- * we can actually steal whole pageblock if this condition met,
+ * we can actually claim the whole pageblock if this condition met,
* but, below check doesn't guarantee it and that is just heuristic
* so could be changed anytime.
*/
@@ -1970,7 +1970,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
* reclaimable pages that are closest to the request size. After a
* while, memory compaction may occur to form large contiguous pages,
* and the next movable allocation may not need to steal. Unmovable and
- * reclaimable allocations need to actually steal pages.
+ * reclaimable allocations need to actually claim the whole block.
*/
if (order >= pageblock_order / 2 ||
start_mt == MIGRATE_RECLAIMABLE ||
@@ -1983,12 +1983,14 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
/*
* Check whether there is a suitable fallback freepage with requested order.
- * If only_stealable is true, this function returns fallback_mt only if
- * we can steal other freepages all together. This would help to reduce
+ * Sets *claim_block to instruct the caller whether it should convert a whole
+ * pageblock to the returned migratetype.
+ * If only_claim is true, this function returns fallback_mt only if
+ * we would do this whole-block claiming. This would help to reduce
* fragmentation due to mixed migratetype pages in one pageblock.
*/
int find_suitable_fallback(struct free_area *area, unsigned int order,
- int migratetype, bool only_stealable, bool *can_steal)
+ int migratetype, bool only_claim, bool *claim_block)
{
int i;
int fallback_mt;
@@ -1996,19 +1998,16 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
if (area->nr_free == 0)
return -1;
- *can_steal = false;
+ *claim_block = false;
for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
fallback_mt = fallbacks[migratetype][i];
if (free_area_empty(area, fallback_mt))
continue;
- if (can_steal_fallback(order, migratetype))
- *can_steal = true;
+ if (should_try_claim_block(order, migratetype))
+ *claim_block = true;
- if (!only_stealable)
- return fallback_mt;
-
- if (*can_steal)
+ if (*claim_block || !only_claim)
return fallback_mt;
}
@@ -2016,14 +2015,14 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
}
/*
- * This function implements actual steal behaviour. If order is large enough, we
- * can claim the whole pageblock for the requested migratetype. If not, we check
- * the pageblock for constituent pages; if at least half of the pages are free
- * or compatible, we can still claim the whole block, so pages freed in the
- * future will be put on the correct free list.
+ * This function implements actual block claiming behaviour. If order is large
+ * enough, we can claim the whole pageblock for the requested migratetype. If
+ * not, we check the pageblock for constituent pages; if at least half of the
+ * pages are free or compatible, we can still claim the whole block, so pages
+ * freed in the future will be put on the correct free list.
*/
static struct page *
-try_to_steal_block(struct zone *zone, struct page *page,
+try_to_claim_block(struct zone *zone, struct page *page,
int current_order, int order, int start_type,
int block_type, unsigned int alloc_flags)
{
@@ -2091,11 +2090,12 @@ try_to_steal_block(struct zone *zone, struct page *page,
/*
* Try finding a free buddy page on the fallback list.
*
- * This will attempt to steal a whole pageblock for the requested type
+ * This will attempt to claim a whole pageblock for the requested type
* to ensure grouping of such requests in the future.
*
- * If a whole block cannot be stolen, regress to __rmqueue_smallest()
- * logic to at least break up as little contiguity as possible.
+ * If a whole block cannot be claimed, steal an individual page, regressing to
+ * __rmqueue_smallest() logic to at least break up as little contiguity as
+ * possible.
*
* The use of signed ints for order and current_order is a deliberate
* deviation from the rest of this file, to make the for loop
@@ -2112,7 +2112,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
int min_order = order;
struct page *page;
int fallback_mt;
- bool can_steal;
+ bool claim_block;
/*
* Do not steal pages from freelists belonging to other pageblocks
@@ -2131,15 +2131,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
- start_migratetype, false, &can_steal);
+ start_migratetype, false, &claim_block);
if (fallback_mt == -1)
continue;
- if (!can_steal)
+ if (!claim_block)
break;
page = get_page_from_free_area(area, fallback_mt);
- page = try_to_steal_block(zone, page, current_order, order,
+ page = try_to_claim_block(zone, page, current_order, order,
start_migratetype, fallback_mt,
alloc_flags);
if (page)
@@ -2149,11 +2149,11 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
if (alloc_flags & ALLOC_NOFRAGMENT)
return NULL;
- /* No luck stealing blocks. Find the smallest fallback page */
+ /* No luck claiming pageblock. Find the smallest fallback page */
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
- start_migratetype, false, &can_steal);
+ start_migratetype, false, &claim_block);
if (fallback_mt == -1)
continue;