@@ -357,7 +357,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
+extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
@@ -887,6 +887,7 @@ static bool too_many_isolated(pg_data_t *pgdat)
if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
low_pfn = end_pfn;
+ page = NULL;
goto isolate_abort;
}
valid_page = page;
@@ -968,6 +969,21 @@ static bool too_many_isolated(pg_data_t *pgdat)
if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
goto isolate_fail;
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ if (unlikely(!get_page_unless_zero(page)))
+ goto isolate_fail;
+
+ if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
+ goto isolate_fail_put;
+
+ /* Try isolate the page */
+ if (!TestClearPageLRU(page))
+ goto isolate_fail_put;
+
/* If we already hold the lock, we can skip some rechecking */
if (!locked) {
locked = compact_lock_irqsave(&pgdat->lru_lock,
@@ -980,10 +996,6 @@ static bool too_many_isolated(pg_data_t *pgdat)
goto isolate_abort;
}
- /* Recheck PageLRU and PageCompound under lock */
- if (!PageLRU(page))
- goto isolate_fail;
-
/*
* Page become compound since the non-locked check,
* and it's on LRU. It can only be a THP so the order
@@ -991,16 +1003,13 @@ static bool too_many_isolated(pg_data_t *pgdat)
*/
if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
low_pfn += compound_nr(page) - 1;
- goto isolate_fail;
+ SetPageLRU(page);
+ goto isolate_fail_put;
}
}
lruvec = mem_cgroup_page_lruvec(page, pgdat);
- /* Try isolate the page */
- if (__isolate_lru_page(page, isolate_mode) != 0)
- goto isolate_fail;
-
/* The whole page is taken off the LRU; skip the tail pages. */
if (PageCompound(page))
low_pfn += compound_nr(page) - 1;
@@ -1029,6 +1038,15 @@ static bool too_many_isolated(pg_data_t *pgdat)
}
continue;
+
+isolate_fail_put:
+ /* Avoid potential deadlock in freeing page under lru_lock */
+ if (locked) {
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
+ locked = false;
+ }
+ put_page(page);
+
isolate_fail:
if (!skip_on_failure)
continue;
@@ -1065,9 +1083,15 @@ static bool too_many_isolated(pg_data_t *pgdat)
if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
+ page = NULL;
+
isolate_abort:
if (locked)
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
+ if (page) {
+ SetPageLRU(page);
+ put_page(page);
+ }
/*
* Updated the cached scanner pfn once the pageblock has been scanned
@@ -1538,7 +1538,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
*
* returns 0 on success, -ve errno on failure.
*/
-int __isolate_lru_page(struct page *page, isolate_mode_t mode)
+int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
{
int ret = -EBUSY;
@@ -1590,22 +1590,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
return ret;
- if (likely(get_page_unless_zero(page))) {
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- if (TestClearPageLRU(page))
- ret = 0;
- else
- put_page(page);
- }
-
- return ret;
+ return 0;
}
-
/*
* Update LRU sizes after isolating pages. The LRU size updates must
* be complete before mem_cgroup_update_lru_size due to a sanity check.
@@ -1685,20 +1672,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* only when the page is being freed somewhere else.
*/
scan += nr_pages;
- switch (__isolate_lru_page(page, mode)) {
+ switch (__isolate_lru_page_prepare(page, mode)) {
case 0:
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ if (unlikely(!get_page_unless_zero(page)))
+ goto busy;
+
+ if (!TestClearPageLRU(page)) {
+ /*
+ * This page may in other isolation path,
+ * but we still hold lru_lock.
+ */
+ put_page(page);
+ goto busy;
+ }
+
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
list_move(&page->lru, dst);
break;
- case -EBUSY:
+ default:
+busy:
/* else it is being freed elsewhere */
list_move(&page->lru, src);
- continue;
-
- default:
- BUG();
}
}