Message ID | 20190603210746.15800-10-hannes@cmpxchg.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm: fix page aging across multiple cgroups | expand |
On Mon, Jun 3, 2019 at 3:08 PM Johannes Weiner <hannes@cmpxchg.org> wrote: > > When file pages are lower than the watermark on a node, we try to > force scan anonymous pages to counter-act the balancing algorithms > preference for new file pages when they are likely thrashing. This is > node-level decision, but it's currently made each time we look at an > lruvec. This is unnecessarily expensive and also a layering violation > that makes the code harder to understand. > > Clean this up by making the check once per node and setting a flag in > the scan_control. > > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Shakeel Butt <shakeelb@google.com> > --- > mm/vmscan.c | 80 ++++++++++++++++++++++++++++------------------------- > 1 file changed, 42 insertions(+), 38 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index eb535c572733..cabf94dfa92d 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -104,6 +104,9 @@ struct scan_control { > /* One of the zones is ready for compaction */ > unsigned int compaction_ready:1; > > + /* The file pages on the current node are dangerously low */ > + unsigned int file_is_tiny:1; > + > /* Allocation order */ > s8 order; > > @@ -2219,45 +2222,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, > } > Unrelated to the patch. I think we need to revisit all the heuristics that were added here over the years. get_scan_count() has become really complicated and weird. > /* > - * Prevent the reclaimer from falling into the cache trap: as > - * cache pages start out inactive, every cache fault will tip > - * the scan balance towards the file LRU. And as the file LRU > - * shrinks, so does the window for rotation from references. > - * This means we have a runaway feedback loop where a tiny > - * thrashing file LRU becomes infinitely more attractive than > - * anon pages. Try to detect this based on file LRU size. > + * If the system is almost out of file pages, force-scan anon. > + * But only if there are enough inactive anonymous pages on > + * the LRU. Otherwise, the small LRU gets thrashed. > */ > - if (!cgroup_reclaim(sc)) { > - unsigned long pgdatfile; > - unsigned long pgdatfree; > - int z; > - unsigned long total_high_wmark = 0; > - > - pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); > - pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + > - node_page_state(pgdat, NR_INACTIVE_FILE); > - > - for (z = 0; z < MAX_NR_ZONES; z++) { > - struct zone *zone = &pgdat->node_zones[z]; > - if (!managed_zone(zone)) > - continue; > - > - total_high_wmark += high_wmark_pages(zone); > - } > - > - if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) { > - /* > - * Force SCAN_ANON if there are enough inactive > - * anonymous pages on the LRU in eligible zones. > - * Otherwise, the small LRU gets thrashed. > - */ > - if (!inactive_list_is_low(lruvec, false, sc, false) && > - lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) > - >> sc->priority) { > - scan_balance = SCAN_ANON; > - goto out; > - } > - } > + if (sc->file_is_tiny && > + !inactive_list_is_low(lruvec, false, sc, false) && > + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, > + sc->reclaim_idx) >> sc->priority) { > + scan_balance = SCAN_ANON; > + goto out; > } > > /* > @@ -2718,6 +2692,36 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) > nr_reclaimed = sc->nr_reclaimed; > nr_scanned = sc->nr_scanned; > > + /* > + * Prevent the reclaimer from falling into the cache trap: as > + * cache pages start out inactive, every cache fault will tip > + * the scan balance towards the file LRU. And as the file LRU > + * shrinks, so does the window for rotation from references. > + * This means we have a runaway feedback loop where a tiny > + * thrashing file LRU becomes infinitely more attractive than > + * anon pages. Try to detect this based on file LRU size. > + */ > + if (!cgroup_reclaim(sc)) { > + unsigned long file; > + unsigned long free; > + int z; > + unsigned long total_high_wmark = 0; > + > + free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); > + file = node_page_state(pgdat, NR_ACTIVE_FILE) + > + node_page_state(pgdat, NR_INACTIVE_FILE); > + > + for (z = 0; z < MAX_NR_ZONES; z++) { > + struct zone *zone = &pgdat->node_zones[z]; > + if (!managed_zone(zone)) > + continue; > + > + total_high_wmark += high_wmark_pages(zone); > + } > + > + sc->file_is_tiny = file + free <= total_high_wmark; > + } > + > shrink_node_memcgs(pgdat, sc); > > if (reclaim_state) { > -- > 2.21.0 >
diff --git a/mm/vmscan.c b/mm/vmscan.c index eb535c572733..cabf94dfa92d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -104,6 +104,9 @@ struct scan_control { /* One of the zones is ready for compaction */ unsigned int compaction_ready:1; + /* The file pages on the current node are dangerously low */ + unsigned int file_is_tiny:1; + /* Allocation order */ s8 order; @@ -2219,45 +2222,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, } /* - * Prevent the reclaimer from falling into the cache trap: as - * cache pages start out inactive, every cache fault will tip - * the scan balance towards the file LRU. And as the file LRU - * shrinks, so does the window for rotation from references. - * This means we have a runaway feedback loop where a tiny - * thrashing file LRU becomes infinitely more attractive than - * anon pages. Try to detect this based on file LRU size. + * If the system is almost out of file pages, force-scan anon. + * But only if there are enough inactive anonymous pages on + * the LRU. Otherwise, the small LRU gets thrashed. */ - if (!cgroup_reclaim(sc)) { - unsigned long pgdatfile; - unsigned long pgdatfree; - int z; - unsigned long total_high_wmark = 0; - - pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); - pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + - node_page_state(pgdat, NR_INACTIVE_FILE); - - for (z = 0; z < MAX_NR_ZONES; z++) { - struct zone *zone = &pgdat->node_zones[z]; - if (!managed_zone(zone)) - continue; - - total_high_wmark += high_wmark_pages(zone); - } - - if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) { - /* - * Force SCAN_ANON if there are enough inactive - * anonymous pages on the LRU in eligible zones. - * Otherwise, the small LRU gets thrashed. - */ - if (!inactive_list_is_low(lruvec, false, sc, false) && - lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) - >> sc->priority) { - scan_balance = SCAN_ANON; - goto out; - } - } + if (sc->file_is_tiny && + !inactive_list_is_low(lruvec, false, sc, false) && + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, + sc->reclaim_idx) >> sc->priority) { + scan_balance = SCAN_ANON; + goto out; } /* @@ -2718,6 +2692,36 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; + /* + * Prevent the reclaimer from falling into the cache trap: as + * cache pages start out inactive, every cache fault will tip + * the scan balance towards the file LRU. And as the file LRU + * shrinks, so does the window for rotation from references. + * This means we have a runaway feedback loop where a tiny + * thrashing file LRU becomes infinitely more attractive than + * anon pages. Try to detect this based on file LRU size. + */ + if (!cgroup_reclaim(sc)) { + unsigned long file; + unsigned long free; + int z; + unsigned long total_high_wmark = 0; + + free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); + file = node_page_state(pgdat, NR_ACTIVE_FILE) + + node_page_state(pgdat, NR_INACTIVE_FILE); + + for (z = 0; z < MAX_NR_ZONES; z++) { + struct zone *zone = &pgdat->node_zones[z]; + if (!managed_zone(zone)) + continue; + + total_high_wmark += high_wmark_pages(zone); + } + + sc->file_is_tiny = file + free <= total_high_wmark; + } + shrink_node_memcgs(pgdat, sc); if (reclaim_state) {
When file pages are lower than the watermark on a node, we try to force scan anonymous pages to counter-act the balancing algorithms preference for new file pages when they are likely thrashing. This is node-level decision, but it's currently made each time we look at an lruvec. This is unnecessarily expensive and also a layering violation that makes the code harder to understand. Clean this up by making the check once per node and setting a flag in the scan_control. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> --- mm/vmscan.c | 80 ++++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 38 deletions(-)