Message ID | 20230829024104.1505530-1-liushixin2@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v4] mm: vmscan: try to reclaim swapcache pages if no swap space | expand |
Liu Shixin <liushixin2@huawei.com> writes: > When spaces of swap devices are exhausted, only file pages can be reclaimed. > But there are still some swapcache pages in anon lru list. This can lead > to a premature out-of-memory. > > The problem is found with such step: > > Firstly, set a 9MB disk swap space, then create a cgroup with 10MB > memory limit, then runs an program to allocates about 15MB memory. > > The problem occurs occasionally, which may need about 100 times. > > Fix it by checking number of swapcache pages in can_reclaim_anon_pages(). > If the number is not zero, return true either. Moreover, add a new bit > swapcache_only in struct scan_control to skip isolating anon pages that > are not swapcache when only swapcache pages can be reclaimed to accelerate > reclaim efficiency. > > Link: https://lore.kernel.org/linux-mm/14e15f31-f3d3-4169-8ed9-fb36e57cf578@huawei.com/ > Signed-off-by: Liu Shixin <liushixin2@huawei.com> > Tested-by: Yosry Ahmed <yosryahmed@google.com> LGTM, Thanks! Reviewed-by: "Huang, Ying" <ying.huang@intel.com> > --- > include/linux/swap.h | 6 ++++++ > mm/memcontrol.c | 8 ++++++++ > mm/vmscan.c | 29 +++++++++++++++++++++++++++-- > 3 files changed, 41 insertions(+), 2 deletions(-) > > diff --git a/include/linux/swap.h b/include/linux/swap.h > index 456546443f1f..0318e918bfa4 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -669,6 +669,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p > } > > extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); > +extern long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg); > extern bool mem_cgroup_swap_full(struct folio *folio); > #else > static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) > @@ -691,6 +692,11 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) > return get_nr_swap_pages(); > } > > +static inline long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) > +{ > + return total_swapcache_pages(); > +} > + > static inline bool mem_cgroup_swap_full(struct folio *folio) > { > return vm_swap_full(); > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index e8ca4bdcb03c..c465829db92b 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -7567,6 +7567,14 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) > return nr_swap_pages; > } > > +long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) > +{ > + if (mem_cgroup_disabled()) > + return total_swapcache_pages(); > + > + return memcg_page_state(memcg, NR_SWAPCACHE); > +} > + > bool mem_cgroup_swap_full(struct folio *folio) > { > struct mem_cgroup *memcg; > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 1080209a568b..e73e2df8828d 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -137,6 +137,9 @@ struct scan_control { > /* Always discard instead of demoting to lower tier memory */ > unsigned int no_demotion:1; > > + /* Swap space is exhausted, only reclaim swapcache for anon LRU */ > + unsigned int swapcache_only:1; > + > /* Allocation order */ > s8 order; > > @@ -613,10 +616,20 @@ static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, > */ > if (get_nr_swap_pages() > 0) > return true; > + /* Is there any swapcache pages to reclaim? */ > + if (total_swapcache_pages() > 0) { > + sc->swapcache_only = 1; > + return true; > + } > } else { > /* Is the memcg below its swap limit? */ > if (mem_cgroup_get_nr_swap_pages(memcg) > 0) > return true; > + /* Is there any swapcache pages in memcg to reclaim? */ > + if (mem_cgroup_get_nr_swapcache_pages(memcg) > 0) { > + sc->swapcache_only = 1; > + return true; > + } > } > > /* > @@ -2280,6 +2293,19 @@ static bool skip_cma(struct folio *folio, struct scan_control *sc) > } > #endif > > +static bool skip_isolate(struct folio *folio, struct scan_control *sc, > + enum lru_list lru) > +{ > + if (folio_zonenum(folio) > sc->reclaim_idx) > + return true; > + if (skip_cma(folio, sc)) > + return true; > + if (unlikely(sc->swapcache_only && !is_file_lru(lru) && > + !folio_test_swapcache(folio))) > + return true; > + return false; > +} > + > /* > * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. > * > @@ -2326,8 +2352,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, > nr_pages = folio_nr_pages(folio); > total_scan += nr_pages; > > - if (folio_zonenum(folio) > sc->reclaim_idx || > - skip_cma(folio, sc)) { > + if (skip_isolate(folio, sc, lru)) { > nr_skipped[folio_zonenum(folio)] += nr_pages; > move_to = &folios_skipped; > goto move;
On Mon, Aug 28, 2023 at 6:46 PM Liu Shixin <liushixin2@huawei.com> wrote: > > When spaces of swap devices are exhausted, only file pages can be reclaimed. > But there are still some swapcache pages in anon lru list. This can lead > to a premature out-of-memory. > > The problem is found with such step: > > Firstly, set a 9MB disk swap space, then create a cgroup with 10MB > memory limit, then runs an program to allocates about 15MB memory. > > The problem occurs occasionally, which may need about 100 times. The reproducer I used in v2 reproduces this very reliably and simply, could you link to it instead? https://lore.kernel.org/lkml/CAJD7tkZAfgncV+KbKr36=eDzMnT=9dZOT0dpMWcurHLr6Do+GA@mail.gmail.com/ > > Fix it by checking number of swapcache pages in can_reclaim_anon_pages(). > If the number is not zero, return true either. Moreover, add a new bit > swapcache_only in struct scan_control to skip isolating anon pages that > are not swapcache when only swapcache pages can be reclaimed to accelerate > reclaim efficiency. > > Link: https://lore.kernel.org/linux-mm/14e15f31-f3d3-4169-8ed9-fb36e57cf578@huawei.com/ > Signed-off-by: Liu Shixin <liushixin2@huawei.com> > Tested-by: Yosry Ahmed <yosryahmed@google.com> Usually people add the difference from the previous version to make it easy to know what changed. I still prefer to add NR_SWAPCACHE to memcg1_stats. Anyway, the code looks good to me. With the updated reproducer link feel free to add: Reviewed-by: Yosry Ahmed <yosryahmed@google.com> > --- > include/linux/swap.h | 6 ++++++ > mm/memcontrol.c | 8 ++++++++ > mm/vmscan.c | 29 +++++++++++++++++++++++++++-- > 3 files changed, 41 insertions(+), 2 deletions(-) > > diff --git a/include/linux/swap.h b/include/linux/swap.h > index 456546443f1f..0318e918bfa4 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -669,6 +669,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p > } > > extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); > +extern long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg); > extern bool mem_cgroup_swap_full(struct folio *folio); > #else > static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) > @@ -691,6 +692,11 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) > return get_nr_swap_pages(); > } > > +static inline long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) > +{ > + return total_swapcache_pages(); > +} > + > static inline bool mem_cgroup_swap_full(struct folio *folio) > { > return vm_swap_full(); > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index e8ca4bdcb03c..c465829db92b 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -7567,6 +7567,14 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) > return nr_swap_pages; > } > > +long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) > +{ > + if (mem_cgroup_disabled()) > + return total_swapcache_pages(); > + > + return memcg_page_state(memcg, NR_SWAPCACHE); > +} > + > bool mem_cgroup_swap_full(struct folio *folio) > { > struct mem_cgroup *memcg; > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 1080209a568b..e73e2df8828d 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -137,6 +137,9 @@ struct scan_control { > /* Always discard instead of demoting to lower tier memory */ > unsigned int no_demotion:1; > > + /* Swap space is exhausted, only reclaim swapcache for anon LRU */ > + unsigned int swapcache_only:1; > + > /* Allocation order */ > s8 order; > > @@ -613,10 +616,20 @@ static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, > */ > if (get_nr_swap_pages() > 0) > return true; > + /* Is there any swapcache pages to reclaim? */ > + if (total_swapcache_pages() > 0) { > + sc->swapcache_only = 1; > + return true; > + } > } else { > /* Is the memcg below its swap limit? */ > if (mem_cgroup_get_nr_swap_pages(memcg) > 0) > return true; > + /* Is there any swapcache pages in memcg to reclaim? */ > + if (mem_cgroup_get_nr_swapcache_pages(memcg) > 0) { > + sc->swapcache_only = 1; > + return true; > + } > } > > /* > @@ -2280,6 +2293,19 @@ static bool skip_cma(struct folio *folio, struct scan_control *sc) > } > #endif > > +static bool skip_isolate(struct folio *folio, struct scan_control *sc, > + enum lru_list lru) > +{ > + if (folio_zonenum(folio) > sc->reclaim_idx) > + return true; > + if (skip_cma(folio, sc)) > + return true; > + if (unlikely(sc->swapcache_only && !is_file_lru(lru) && > + !folio_test_swapcache(folio))) > + return true; > + return false; > +} > + > /* > * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. > * > @@ -2326,8 +2352,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, > nr_pages = folio_nr_pages(folio); > total_scan += nr_pages; > > - if (folio_zonenum(folio) > sc->reclaim_idx || > - skip_cma(folio, sc)) { > + if (skip_isolate(folio, sc, lru)) { > nr_skipped[folio_zonenum(folio)] += nr_pages; > move_to = &folios_skipped; > goto move; > -- > 2.25.1 >
On 2023/8/29 23:23, Yosry Ahmed wrote: > On Mon, Aug 28, 2023 at 6:46 PM Liu Shixin <liushixin2@huawei.com> wrote: >> When spaces of swap devices are exhausted, only file pages can be reclaimed. >> But there are still some swapcache pages in anon lru list. This can lead >> to a premature out-of-memory. >> >> The problem is found with such step: >> >> Firstly, set a 9MB disk swap space, then create a cgroup with 10MB >> memory limit, then runs an program to allocates about 15MB memory. >> >> The problem occurs occasionally, which may need about 100 times. > The reproducer I used in v2 reproduces this very reliably and simply, > could you link to it instead? > > https://lore.kernel.org/lkml/CAJD7tkZAfgncV+KbKr36=eDzMnT=9dZOT0dpMWcurHLr6Do+GA@mail.gmail.com/ OK > >> Fix it by checking number of swapcache pages in can_reclaim_anon_pages(). >> If the number is not zero, return true either. Moreover, add a new bit >> swapcache_only in struct scan_control to skip isolating anon pages that >> are not swapcache when only swapcache pages can be reclaimed to accelerate >> reclaim efficiency. >> >> Link: https://lore.kernel.org/linux-mm/14e15f31-f3d3-4169-8ed9-fb36e57cf578@huawei.com/ >> Signed-off-by: Liu Shixin <liushixin2@huawei.com> >> Tested-by: Yosry Ahmed <yosryahmed@google.com> > Usually people add the difference from the previous version to make it > easy to know what changed. > > I still prefer to add NR_SWAPCACHE to memcg1_stats. Anyway, the code > looks good to me. With the updated reproducer link feel free to add: > > Reviewed-by: Yosry Ahmed <yosryahmed@google.com> Thanks, > >> --- >> include/linux/swap.h | 6 ++++++ >> mm/memcontrol.c | 8 ++++++++ >> mm/vmscan.c | 29 +++++++++++++++++++++++++++-- >> 3 files changed, 41 insertions(+), 2 deletions(-) >> >> diff --git a/include/linux/swap.h b/include/linux/swap.h >> index 456546443f1f..0318e918bfa4 100644 >> --- a/include/linux/swap.h >> +++ b/include/linux/swap.h >> @@ -669,6 +669,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p >> } >> >> extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); >> +extern long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg); >> extern bool mem_cgroup_swap_full(struct folio *folio); >> #else >> static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) >> @@ -691,6 +692,11 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) >> return get_nr_swap_pages(); >> } >> >> +static inline long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) >> +{ >> + return total_swapcache_pages(); >> +} >> + >> static inline bool mem_cgroup_swap_full(struct folio *folio) >> { >> return vm_swap_full(); >> diff --git a/mm/memcontrol.c b/mm/memcontrol.c >> index e8ca4bdcb03c..c465829db92b 100644 >> --- a/mm/memcontrol.c >> +++ b/mm/memcontrol.c >> @@ -7567,6 +7567,14 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) >> return nr_swap_pages; >> } >> >> +long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) >> +{ >> + if (mem_cgroup_disabled()) >> + return total_swapcache_pages(); >> + >> + return memcg_page_state(memcg, NR_SWAPCACHE); >> +} >> + >> bool mem_cgroup_swap_full(struct folio *folio) >> { >> struct mem_cgroup *memcg; >> diff --git a/mm/vmscan.c b/mm/vmscan.c >> index 1080209a568b..e73e2df8828d 100644 >> --- a/mm/vmscan.c >> +++ b/mm/vmscan.c >> @@ -137,6 +137,9 @@ struct scan_control { >> /* Always discard instead of demoting to lower tier memory */ >> unsigned int no_demotion:1; >> >> + /* Swap space is exhausted, only reclaim swapcache for anon LRU */ >> + unsigned int swapcache_only:1; >> + >> /* Allocation order */ >> s8 order; >> >> @@ -613,10 +616,20 @@ static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, >> */ >> if (get_nr_swap_pages() > 0) >> return true; >> + /* Is there any swapcache pages to reclaim? */ >> + if (total_swapcache_pages() > 0) { >> + sc->swapcache_only = 1; >> + return true; >> + } >> } else { >> /* Is the memcg below its swap limit? */ >> if (mem_cgroup_get_nr_swap_pages(memcg) > 0) >> return true; >> + /* Is there any swapcache pages in memcg to reclaim? */ >> + if (mem_cgroup_get_nr_swapcache_pages(memcg) > 0) { >> + sc->swapcache_only = 1; >> + return true; >> + } >> } >> >> /* >> @@ -2280,6 +2293,19 @@ static bool skip_cma(struct folio *folio, struct scan_control *sc) >> } >> #endif >> >> +static bool skip_isolate(struct folio *folio, struct scan_control *sc, >> + enum lru_list lru) >> +{ >> + if (folio_zonenum(folio) > sc->reclaim_idx) >> + return true; >> + if (skip_cma(folio, sc)) >> + return true; >> + if (unlikely(sc->swapcache_only && !is_file_lru(lru) && >> + !folio_test_swapcache(folio))) >> + return true; >> + return false; >> +} >> + >> /* >> * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. >> * >> @@ -2326,8 +2352,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, >> nr_pages = folio_nr_pages(folio); >> total_scan += nr_pages; >> >> - if (folio_zonenum(folio) > sc->reclaim_idx || >> - skip_cma(folio, sc)) { >> + if (skip_isolate(folio, sc, lru)) { >> nr_skipped[folio_zonenum(folio)] += nr_pages; >> move_to = &folios_skipped; >> goto move; >> -- >> 2.25.1 >> > . >
diff --git a/include/linux/swap.h b/include/linux/swap.h index 456546443f1f..0318e918bfa4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -669,6 +669,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p } extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); +extern long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) @@ -691,6 +692,11 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) return get_nr_swap_pages(); } +static inline long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) +{ + return total_swapcache_pages(); +} + static inline bool mem_cgroup_swap_full(struct folio *folio) { return vm_swap_full(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e8ca4bdcb03c..c465829db92b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7567,6 +7567,14 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) return nr_swap_pages; } +long mem_cgroup_get_nr_swapcache_pages(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return total_swapcache_pages(); + + return memcg_page_state(memcg, NR_SWAPCACHE); +} + bool mem_cgroup_swap_full(struct folio *folio) { struct mem_cgroup *memcg; diff --git a/mm/vmscan.c b/mm/vmscan.c index 1080209a568b..e73e2df8828d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -137,6 +137,9 @@ struct scan_control { /* Always discard instead of demoting to lower tier memory */ unsigned int no_demotion:1; + /* Swap space is exhausted, only reclaim swapcache for anon LRU */ + unsigned int swapcache_only:1; + /* Allocation order */ s8 order; @@ -613,10 +616,20 @@ static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, */ if (get_nr_swap_pages() > 0) return true; + /* Is there any swapcache pages to reclaim? */ + if (total_swapcache_pages() > 0) { + sc->swapcache_only = 1; + return true; + } } else { /* Is the memcg below its swap limit? */ if (mem_cgroup_get_nr_swap_pages(memcg) > 0) return true; + /* Is there any swapcache pages in memcg to reclaim? */ + if (mem_cgroup_get_nr_swapcache_pages(memcg) > 0) { + sc->swapcache_only = 1; + return true; + } } /* @@ -2280,6 +2293,19 @@ static bool skip_cma(struct folio *folio, struct scan_control *sc) } #endif +static bool skip_isolate(struct folio *folio, struct scan_control *sc, + enum lru_list lru) +{ + if (folio_zonenum(folio) > sc->reclaim_idx) + return true; + if (skip_cma(folio, sc)) + return true; + if (unlikely(sc->swapcache_only && !is_file_lru(lru) && + !folio_test_swapcache(folio))) + return true; + return false; +} + /* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * @@ -2326,8 +2352,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, nr_pages = folio_nr_pages(folio); total_scan += nr_pages; - if (folio_zonenum(folio) > sc->reclaim_idx || - skip_cma(folio, sc)) { + if (skip_isolate(folio, sc, lru)) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; goto move;