@@ -4249,24 +4249,24 @@ int page_evictable(struct page *page)
*/
void check_move_unevictable_pages(struct pagevec *pvec)
{
- struct lruvec *lruvec;
- struct pglist_data *pgdat = NULL;
+ struct lruvec *locked_lruvec = NULL;
int pgscanned = 0;
int pgrescued = 0;
int i;
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
- struct pglist_data *pagepgdat = page_pgdat(page);
+ struct pglist_data *pgdat = page_pgdat(page);
+ struct lruvec *lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgscanned++;
- if (pagepgdat != pgdat) {
- if (pgdat)
- spin_unlock_irq(&pgdat->lruvec.lru_lock);
- pgdat = pagepgdat;
- spin_lock_irq(&pgdat->lruvec.lru_lock);
+ if (lruvec != locked_lruvec) {
+ if (locked_lruvec)
+ spin_unlock_irq(&locked_lruvec->lru_lock);
+ locked_lruvec = lruvec;
+ spin_lock_irq(&lruvec->lru_lock);
+ sync_lruvec_pgdat(lruvec, pgdat);
}
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
@@ -4282,10 +4282,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
}
}
- if (pgdat) {
+ if (locked_lruvec) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
- spin_unlock_irq(&pgdat->lruvec.lru_lock);
+ spin_unlock_irq(&locked_lruvec->lru_lock);
}
}
EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
to replace per pgdat lru_lock. Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Kirill Tkhai <ktkhai@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Yafang Shao <laoar.shao@gmail.com> Cc: Yang Shi <yang.shi@linux.alibaba.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: cgroups@vger.kernel.org Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org --- mm/vmscan.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-)