@@ -404,21 +404,6 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
return true;
}
-/*
- * Aside from avoiding lock contention, compaction also periodically checks
- * need_resched() and records async compaction as contended if necessary.
- */
-static inline void compact_check_resched(struct compact_control *cc)
-{
- /* async compaction aborts if contended */
- if (need_resched()) {
- if (cc->mode == MIGRATE_ASYNC)
- cc->contended = true;
-
- cond_resched();
- }
-}
-
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
@@ -447,7 +432,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
return true;
}
- compact_check_resched(cc);
+ cond_resched();
return false;
}
@@ -736,7 +721,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
return 0;
}
- compact_check_resched(cc);
+ cond_resched();
if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
skip_on_failure = true;
@@ -1370,7 +1355,7 @@ static void isolate_freepages(struct compact_control *cc)
* suitable migration targets, so periodically check resched.
*/
if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
- compact_check_resched(cc);
+ cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
@@ -1664,7 +1649,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* need to schedule.
*/
if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
- compact_check_resched(cc);
+ cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);