@@ -200,6 +200,19 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
if (!put_devmap_managed_page_refs(&folio->page, refs))
folio_put_refs(folio, refs);
+
+ if (folio_test_unevictable(folio) && folio_evictable(folio)) {
+ struct lruvec *lruvec = folio_lruvec_lock_irq(folio);
+
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_unevictable(folio);
+ lruvec_add_folio(lruvec, folio);
+ folio_set_lru(folio);
+ __count_vm_events(UNEVICTABLE_PGRESCUED,
+ folio_nr_pages(folio));
+
+ unlock_page_lruvec_irq(lruvec);
+ }
}
/**
@@ -154,7 +154,7 @@ static inline bool folio_evictable(struct folio *folio)
/* Prevent address_space of inode and swap cache from being freed */
rcu_read_lock();
ret = !mapping_unevictable(folio_mapping(folio)) &&
- !folio_test_mlocked(folio);
+ !folio_test_mlocked(folio) && !folio_maybe_dma_pinned(folio);
rcu_read_unlock();
return ret;
}
To avoid meaningless scan during memory reclaim, this patch moves pinned pages to unevictable list to avoid meaningless scan during memory reclaim. Update folio_evictable() to check if the folio is pinned. If the folio is pinned, the function returns false. The vmscan code will put the folio to inevictable list when scanning the folio, so each pinned page will be scanned at most once. When the folio is unpinned, check if the folio can be put back to inactive/active list by calling folio_evictable(). Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com> --- mm/gup.c | 13 +++++++++++++ mm/internal.h | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-)