@@ -1701,6 +1701,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
unsigned int nr_reclaimed;
struct page *page, *next;
LIST_HEAD(clean_pages);
+ unsigned int noreclaim_flag;
list_for_each_entry_safe(page, next, page_list, lru) {
if (!PageHuge(page) && page_is_file_lru(page) &&
@@ -1711,8 +1712,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
}
}
+ /*
+ * We should be safe here since we are only dealing with file pages and
+ * we are not kswapd and therefore cannot write dirty file pages. But
+ * call memalloc_noreclaim_save() anyway, just in case these conditions
+ * change in the future.
+ */
+ noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
&stat, true);
+ memalloc_noreclaim_restore(noreclaim_flag);
+
list_splice(&clean_pages, page_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
@@ -2306,6 +2316,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
LIST_HEAD(node_page_list);
struct reclaim_stat dummy_stat;
struct page *page;
+ unsigned int noreclaim_flag;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
@@ -2314,6 +2325,8 @@ unsigned long reclaim_pages(struct list_head *page_list)
.may_swap = 1,
};
+ noreclaim_flag = memalloc_noreclaim_save();
+
while (!list_empty(page_list)) {
page = lru_to_page(page_list);
if (nid == NUMA_NO_NODE) {
@@ -2350,6 +2363,8 @@ unsigned long reclaim_pages(struct list_head *page_list)
}
}
+ memalloc_noreclaim_restore(noreclaim_flag);
+
return nr_reclaimed;
}
Use memalloc_noreclaim_save()/memalloc_noreclaim_restore() in reclaim_pages() to prevent the page reclaim from going into the block I/O layer recursively and deadlock. Signed-off-by: Yu Zhao <yuzhao@google.com> --- mm/vmscan.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+)