@@ -509,6 +509,29 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
return true;
}
+static struct lruvec *
+compact_folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags,
+ struct compact_control *cc)
+{
+ struct lruvec *lruvec;
+
+ lruvec = folio_lruvec(folio);
+
+ /* Track if the lock is contended in async mode */
+ if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
+ if (spin_trylock_irqsave(&lruvec->lru_lock, *flags))
+ goto out;
+
+ cc->contended = true;
+ }
+
+ spin_lock_irqsave(&lruvec->lru_lock, *flags);
+out:
+ lruvec_memcg_debug(lruvec, folio);
+
+ return lruvec;
+}
+
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
@@ -837,6 +860,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
+ struct folio *folio;
if (skip_on_failure && low_pfn >= next_skip_pfn) {
/*
@@ -1022,18 +1046,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (!TestClearPageLRU(page))
goto isolate_fail_put;
- lruvec = folio_lruvec(page_folio(page));
+ folio = page_folio(page);
+ lruvec = folio_lruvec(folio);
/* If we already hold the lock, we can skip some rechecking */
if (lruvec != locked) {
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);
- compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
+ lruvec = compact_folio_lruvec_lock_irqsave(folio, &flags, cc);
locked = lruvec;
- lruvec_memcg_debug(lruvec, page_folio(page));
-
/* Try get exclusive access under lock */
if (!skip_updated) {
skip_updated = true;