@@ -509,6 +509,29 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
return true;
}
+static struct lruvec *compact_lock_page_irqsave(struct page *page,
+ unsigned long *flags,
+ struct compact_control *cc)
+{
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_page_lruvec(page);
+
+ /* Track if the lock is contended in async mode */
+ if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
+ if (spin_trylock_irqsave(&lruvec->lru_lock, *flags))
+ goto out;
+
+ cc->contended = true;
+ }
+
+ spin_lock_irqsave(&lruvec->lru_lock, *flags);
+out:
+ lruvec_memcg_debug(lruvec, page);
+
+ return lruvec;
+}
+
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
@@ -1029,11 +1052,9 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);
- compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
+ lruvec = compact_lock_page_irqsave(page, &flags, cc);
locked = lruvec;
- lruvec_memcg_debug(lruvec, page);
-
/* Try get exclusive access under lock */
if (!skip_updated) {
skip_updated = true;