Message ID | 20210618061537.434999-8-ying.huang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Migrate Pages in lieu of discard | expand |
On 18 Jun 2021, at 2:15, Huang Ying wrote: > From: Dave Hansen <dave.hansen@linux.intel.com> > > Anonymous pages are kept on their own LRU(s). These lists could > theoretically always be scanned and maintained. But, without swap, > there is currently nothing the kernel can *do* with the results of a > scanned, sorted LRU for anonymous pages. > > A check for '!total_swap_pages' currently serves as a valid check as > to whether anonymous LRUs should be maintained. However, another > method will be added shortly: page demotion. > > Abstract out the 'total_swap_pages' checks into a helper, give it a > logically significant name, and check for the possibility of page > demotion. > > Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> > Signed-off-by: "Huang, Ying" <ying.huang@intel.com> > Reviewed-by: Yang Shi <shy828301@gmail.com> > Reviewed-by: Greg Thelen <gthelen@google.com> > Cc: Michal Hocko <mhocko@suse.com> > Cc: Wei Xu <weixugc@google.com> > Cc: David Rientjes <rientjes@google.com> > Cc: Dan Williams <dan.j.williams@intel.com> > Cc: David Hildenbrand <david@redhat.com> > Cc: osalvador <osalvador@suse.de> > --- > mm/vmscan.c | 28 +++++++++++++++++++++++++--- > 1 file changed, 25 insertions(+), 3 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 7d5c7216a4b7..8654cec65522 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -2706,6 +2706,26 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, > } > } > > +/* > + * Anonymous LRU management is a waste if there is > + * ultimately no way to reclaim the memory. > + */ > +bool anon_should_be_aged(struct lruvec *lruvec) > +{ > + struct pglist_data *pgdat = lruvec_pgdat(lruvec); > + > + /* Aging the anon LRU is valuable if swap is present: */ > + if (total_swap_pages > 0) > + return true; > + > + /* Also valuable if anon pages can be demoted: */ > + if (next_demotion_node(pgdat->node_id) >= 0) != NUMA_NO_NODE might be better, even though we know NUMA_NO_NODE is currently set to -1. > + return true; > + > + /* No way to reclaim anon pages. Should not age anon LRUs: */ > + return false; > +} > + > static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) > { > unsigned long nr[NR_LRU_LISTS]; > @@ -2815,7 +2835,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) > * Even if we did not try to evict anon pages at all, we want to > * rebalance the anon lru active/inactive ratio. > */ > - if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON)) > + if (anon_should_be_aged(lruvec) && > + inactive_is_low(lruvec, LRU_INACTIVE_ANON)) > shrink_active_list(SWAP_CLUSTER_MAX, lruvec, > sc, LRU_ACTIVE_ANON); > } > @@ -3644,10 +3665,11 @@ static void age_active_anon(struct pglist_data *pgdat, > struct mem_cgroup *memcg; > struct lruvec *lruvec; > > - if (!total_swap_pages) > + lruvec = mem_cgroup_lruvec(NULL, pgdat); > + > + if (!anon_should_be_aged(lruvec)) > return; > > - lruvec = mem_cgroup_lruvec(NULL, pgdat); > if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) > return; > > -- > 2.30.2 — Best Regards, Yan, Zi
Zi Yan <ziy@nvidia.com> writes: > On 18 Jun 2021, at 2:15, Huang Ying wrote: > >> From: Dave Hansen <dave.hansen@linux.intel.com> >> >> Anonymous pages are kept on their own LRU(s). These lists could >> theoretically always be scanned and maintained. But, without swap, >> there is currently nothing the kernel can *do* with the results of a >> scanned, sorted LRU for anonymous pages. >> >> A check for '!total_swap_pages' currently serves as a valid check as >> to whether anonymous LRUs should be maintained. However, another >> method will be added shortly: page demotion. >> >> Abstract out the 'total_swap_pages' checks into a helper, give it a >> logically significant name, and check for the possibility of page >> demotion. >> >> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> >> Signed-off-by: "Huang, Ying" <ying.huang@intel.com> >> Reviewed-by: Yang Shi <shy828301@gmail.com> >> Reviewed-by: Greg Thelen <gthelen@google.com> >> Cc: Michal Hocko <mhocko@suse.com> >> Cc: Wei Xu <weixugc@google.com> >> Cc: David Rientjes <rientjes@google.com> >> Cc: Dan Williams <dan.j.williams@intel.com> >> Cc: David Hildenbrand <david@redhat.com> >> Cc: osalvador <osalvador@suse.de> >> --- >> mm/vmscan.c | 28 +++++++++++++++++++++++++--- >> 1 file changed, 25 insertions(+), 3 deletions(-) >> >> diff --git a/mm/vmscan.c b/mm/vmscan.c >> index 7d5c7216a4b7..8654cec65522 100644 >> --- a/mm/vmscan.c >> +++ b/mm/vmscan.c >> @@ -2706,6 +2706,26 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, >> } >> } >> >> +/* >> + * Anonymous LRU management is a waste if there is >> + * ultimately no way to reclaim the memory. >> + */ >> +bool anon_should_be_aged(struct lruvec *lruvec) >> +{ >> + struct pglist_data *pgdat = lruvec_pgdat(lruvec); >> + >> + /* Aging the anon LRU is valuable if swap is present: */ >> + if (total_swap_pages > 0) >> + return true; >> + >> + /* Also valuable if anon pages can be demoted: */ >> + if (next_demotion_node(pgdat->node_id) >= 0) > > != NUMA_NO_NODE might be better, even though we know NUMA_NO_NODE > is currently set to -1. Sure. Will change this in the next version. Best Regards, Huang, Ying >> + return true; >> + >> + /* No way to reclaim anon pages. Should not age anon LRUs: */ >> + return false; >> +} >> + >> static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) >> { >> unsigned long nr[NR_LRU_LISTS]; >> @@ -2815,7 +2835,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) >> * Even if we did not try to evict anon pages at all, we want to >> * rebalance the anon lru active/inactive ratio. >> */ >> - if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON)) >> + if (anon_should_be_aged(lruvec) && >> + inactive_is_low(lruvec, LRU_INACTIVE_ANON)) >> shrink_active_list(SWAP_CLUSTER_MAX, lruvec, >> sc, LRU_ACTIVE_ANON); >> } >> @@ -3644,10 +3665,11 @@ static void age_active_anon(struct pglist_data *pgdat, >> struct mem_cgroup *memcg; >> struct lruvec *lruvec; >> >> - if (!total_swap_pages) >> + lruvec = mem_cgroup_lruvec(NULL, pgdat); >> + >> + if (!anon_should_be_aged(lruvec)) >> return; >> >> - lruvec = mem_cgroup_lruvec(NULL, pgdat); >> if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) >> return; >> >> -- >> 2.30.2 > > > — > Best Regards, > Yan, Zi
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7d5c7216a4b7..8654cec65522 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2706,6 +2706,26 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, } } +/* + * Anonymous LRU management is a waste if there is + * ultimately no way to reclaim the memory. + */ +bool anon_should_be_aged(struct lruvec *lruvec) +{ + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + + /* Aging the anon LRU is valuable if swap is present: */ + if (total_swap_pages > 0) + return true; + + /* Also valuable if anon pages can be demoted: */ + if (next_demotion_node(pgdat->node_id) >= 0) + return true; + + /* No way to reclaim anon pages. Should not age anon LRUs: */ + return false; +} + static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { unsigned long nr[NR_LRU_LISTS]; @@ -2815,7 +2835,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ - if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON)) + if (anon_should_be_aged(lruvec) && + inactive_is_low(lruvec, LRU_INACTIVE_ANON)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); } @@ -3644,10 +3665,11 @@ static void age_active_anon(struct pglist_data *pgdat, struct mem_cgroup *memcg; struct lruvec *lruvec; - if (!total_swap_pages) + lruvec = mem_cgroup_lruvec(NULL, pgdat); + + if (!anon_should_be_aged(lruvec)) return; - lruvec = mem_cgroup_lruvec(NULL, pgdat); if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) return;