diff mbox series

[RFC,3/5] mm/memcontrol: do not scan anon pages if memsw limit is hit

Message ID 20250319064148.774406-4-jingxiangzeng.cas@gmail.com (mailing list archive)
State New
Headers show
Series add option to restore swap account to cgroupv1 mode | expand

Commit Message

jingxiang zeng March 19, 2025, 6:41 a.m. UTC
From: Zeng Jingxiang <linuszeng@tencent.com>

When memory recycling is triggered by the hard watermark of
memsw, anonymous pages do not want to be recycled any further.
This is consistent with the processing method of cgroup v2.

Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
---
 mm/memcontrol.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

Comments

Shakeel Butt March 19, 2025, 7:36 p.m. UTC | #1
On Wed, Mar 19, 2025 at 02:41:46PM +0800, Jingxiang Zeng wrote:
> From: Zeng Jingxiang <linuszeng@tencent.com>
> 
> When memory recycling is triggered by the hard watermark of

What is hard watermark?

> memsw, anonymous pages do not want to be recycled any further.
> This is consistent with the processing method of cgroup v2.
> 
> Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>

Is this patch orthogonal to the series or is it needed for v1 as well?

> ---
>  mm/memcontrol.c | 15 +++++++++++----
>  1 file changed, 11 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index c1171fb2bfd6..623ebf610946 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -5072,14 +5072,21 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
>  
>  long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
>  {
> +	struct page_counter *pg_counter;
>  	long nr_swap_pages = get_nr_swap_pages();
>  
> -	if (mem_cgroup_disabled() || do_memsw_account())
> +	if (mem_cgroup_disabled())
>  		return nr_swap_pages;
> -	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
> +	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
> +		if (do_memsw_account())
> +			pg_counter = &memcg->memsw;
> +		else
> +			pg_counter = &memcg->swap;
> +
>  		nr_swap_pages = min_t(long, nr_swap_pages,
> -				      READ_ONCE(memcg->swap.max) -
> -				      page_counter_read(&memcg->swap));
> +				      READ_ONCE(pg_counter->max) -
> +				      page_counter_read(pg_counter));
> +	}
>  	return nr_swap_pages;
>  }
>  
> -- 
> 2.41.1
>
jingxiang zeng March 20, 2025, 8:40 a.m. UTC | #2
On Thu, 20 Mar 2025 at 03:36, Shakeel Butt <shakeel.butt@linux.dev> wrote:
>
> On Wed, Mar 19, 2025 at 02:41:46PM +0800, Jingxiang Zeng wrote:
> > From: Zeng Jingxiang <linuszeng@tencent.com>
> >
> > When memory recycling is triggered by the hard watermark of
>
> What is hard watermark?

memory.memsw.limit_in_bytes.
>
> > memsw, anonymous pages do not want to be recycled any further.
> > This is consistent with the processing method of cgroup v2.
> >
> > Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
>
> Is this patch orthogonal to the series or is it needed for v1 as well?

Yes, it is needed for cgroupv1 as well
>
> > ---
> >  mm/memcontrol.c | 15 +++++++++++----
> >  1 file changed, 11 insertions(+), 4 deletions(-)
> >
> > diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> > index c1171fb2bfd6..623ebf610946 100644
> > --- a/mm/memcontrol.c
> > +++ b/mm/memcontrol.c
> > @@ -5072,14 +5072,21 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
> >
> >  long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
> >  {
> > +     struct page_counter *pg_counter;
> >       long nr_swap_pages = get_nr_swap_pages();
> >
> > -     if (mem_cgroup_disabled() || do_memsw_account())
> > +     if (mem_cgroup_disabled())
> >               return nr_swap_pages;
> > -     for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
> > +     for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
> > +             if (do_memsw_account())
> > +                     pg_counter = &memcg->memsw;
> > +             else
> > +                     pg_counter = &memcg->swap;
> > +
> >               nr_swap_pages = min_t(long, nr_swap_pages,
> > -                                   READ_ONCE(memcg->swap.max) -
> > -                                   page_counter_read(&memcg->swap));
> > +                                   READ_ONCE(pg_counter->max) -
> > +                                   page_counter_read(pg_counter));
> > +     }
> >       return nr_swap_pages;
> >  }
> >
> > --
> > 2.41.1
> >
>
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c1171fb2bfd6..623ebf610946 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5072,14 +5072,21 @@  void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
 
 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 {
+	struct page_counter *pg_counter;
 	long nr_swap_pages = get_nr_swap_pages();
 
-	if (mem_cgroup_disabled() || do_memsw_account())
+	if (mem_cgroup_disabled())
 		return nr_swap_pages;
-	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
+	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
+		if (do_memsw_account())
+			pg_counter = &memcg->memsw;
+		else
+			pg_counter = &memcg->swap;
+
 		nr_swap_pages = min_t(long, nr_swap_pages,
-				      READ_ONCE(memcg->swap.max) -
-				      page_counter_read(&memcg->swap));
+				      READ_ONCE(pg_counter->max) -
+				      page_counter_read(pg_counter));
+	}
 	return nr_swap_pages;
 }