diff mbox series

mm/list_lru: make the case where mlru is NULL as unlikely

Message ID 20250225153020.2514685-1-jingxiangzeng.cas@gmail.com (mailing list archive)
State New
Headers show
Series mm/list_lru: make the case where mlru is NULL as unlikely | expand

Commit Message

Jingxiang Zeng Feb. 25, 2025, 3:30 p.m. UTC
From: Zeng Jingxiang <linuszeng@tencent.com>

In the following memcg_list_lru_alloc() function, mlru here is almost
always NULL, so in most cases this should save a function call, mark
mlru as unlikely to optimize the code.
        do {
                xas_lock_irqsave(&xas, flags);
                if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
                        xas_store(&xas, mlru);
                        if (!xas_error(&xas))
                                mlru = NULL;
                }
                xas_unlock_irqrestore(&xas, flags);
        } while (xas_nomem(&xas, GFP_KERNEL));
>       if (mlru)
                kfree(mlru);

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
---
 mm/list_lru.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

Comments

Shakeel Butt Feb. 25, 2025, 4:23 p.m. UTC | #1
On Tue, Feb 25, 2025 at 11:30:20PM +0800, Jingxiang Zeng wrote:
> From: Zeng Jingxiang <linuszeng@tencent.com>
> 
> In the following memcg_list_lru_alloc() function, mlru here is almost
> always NULL, so in most cases this should save a function call, mark
> mlru as unlikely to optimize the code.
>         do {
>                 xas_lock_irqsave(&xas, flags);
>                 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
>                         xas_store(&xas, mlru);
>                         if (!xas_error(&xas))
>                                 mlru = NULL;
>                 }
>                 xas_unlock_irqrestore(&xas, flags);
>         } while (xas_nomem(&xas, GFP_KERNEL));
> >       if (mlru)
>                 kfree(mlru);
> 
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> ---
>  mm/list_lru.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index 064d2018e265..e7e13513ff8e 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -552,7 +552,7 @@ static int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru)
>  			}
>  			xas_unlock_irqrestore(&xas, flags);
>  		} while (xas_nomem(&xas, GFP_KERNEL));
> -		if (mlru)
> +		if (unlikely(mlru))
>  			kfree(mlru);

The report is saying not to check at all. So, just remove the check and
simply call kfree(mlru) as it handles the NULL check efficiently.

>  		set_active_memcg(cur);
>  	} while (pos != memcg && !css_is_dying(&pos->css));
> -- 
> 2.43.5
> 
>
Johannes Weiner Feb. 26, 2025, 1:11 a.m. UTC | #2
On Tue, Feb 25, 2025 at 08:23:12AM -0800, Shakeel Butt wrote:
> On Tue, Feb 25, 2025 at 11:30:20PM +0800, Jingxiang Zeng wrote:
> > From: Zeng Jingxiang <linuszeng@tencent.com>
> > 
> > In the following memcg_list_lru_alloc() function, mlru here is almost
> > always NULL, so in most cases this should save a function call, mark
> > mlru as unlikely to optimize the code.
> >         do {
> >                 xas_lock_irqsave(&xas, flags);
> >                 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
> >                         xas_store(&xas, mlru);
> >                         if (!xas_error(&xas))
> >                                 mlru = NULL;
> >                 }
> >                 xas_unlock_irqrestore(&xas, flags);
> >         } while (xas_nomem(&xas, GFP_KERNEL));
> > >       if (mlru)
> >                 kfree(mlru);
> > 
> > Reported-by: kernel test robot <lkp@intel.com>
> > Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> > Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> > ---
> >  mm/list_lru.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/mm/list_lru.c b/mm/list_lru.c
> > index 064d2018e265..e7e13513ff8e 100644
> > --- a/mm/list_lru.c
> > +++ b/mm/list_lru.c
> > @@ -552,7 +552,7 @@ static int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru)
> >  			}
> >  			xas_unlock_irqrestore(&xas, flags);
> >  		} while (xas_nomem(&xas, GFP_KERNEL));
> > -		if (mlru)
> > +		if (unlikely(mlru))
> >  			kfree(mlru);
> 
> The report is saying not to check at all. So, just remove the check and
> simply call kfree(mlru) as it handles the NULL check efficiently.

I actually like it in this case. It's an "active comment" that this
only happens in the failure case and we don't routinely free here.

That said, does it have to free the mlru inside the loop at all? If
the tree insertion fails, why not reuse it for the next attempt?

diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7d69434c70e0..490473af3122 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
 			 gfp_t gfp)
 {
 	unsigned long flags;
-	struct list_lru_memcg *mlru;
+	struct list_lru_memcg *mlru = NULL;
 	struct mem_cgroup *pos, *parent;
 	XA_STATE(xas, &lru->xa, 0);
 
@@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
 			parent = parent_mem_cgroup(pos);
 		}
 
-		mlru = memcg_init_list_lru_one(lru, gfp);
-		if (!mlru)
-			return -ENOMEM;
+		if (!mlru) {
+			mlru = memcg_init_list_lru_one(lru, gfp);
+			if (!mlru)
+				return -ENOMEM;
+		}
 		xas_set(&xas, pos->kmemcg_id);
 		do {
 			xas_lock_irqsave(&xas, flags);
@@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
 			}
 			xas_unlock_irqrestore(&xas, flags);
 		} while (xas_nomem(&xas, gfp));
-		if (mlru)
-			kfree(mlru);
 	} while (pos != memcg && !css_is_dying(&pos->css));
 
+	if (unlikely(mlru))
+		kfree(mlru);
+
 	return xas_error(&xas);
 }
 #else
Jingxiang Zeng Feb. 26, 2025, 2:09 a.m. UTC | #3
On Wed, 26 Feb 2025 at 09:12, Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Tue, Feb 25, 2025 at 08:23:12AM -0800, Shakeel Butt wrote:
> > On Tue, Feb 25, 2025 at 11:30:20PM +0800, Jingxiang Zeng wrote:
> > > From: Zeng Jingxiang <linuszeng@tencent.com>
> > >
> > > In the following memcg_list_lru_alloc() function, mlru here is almost
> > > always NULL, so in most cases this should save a function call, mark
> > > mlru as unlikely to optimize the code.
> > >         do {
> > >                 xas_lock_irqsave(&xas, flags);
> > >                 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
> > >                         xas_store(&xas, mlru);
> > >                         if (!xas_error(&xas))
> > >                                 mlru = NULL;
> > >                 }
> > >                 xas_unlock_irqrestore(&xas, flags);
> > >         } while (xas_nomem(&xas, GFP_KERNEL));
> > > >       if (mlru)
> > >                 kfree(mlru);
> > >
> > > Reported-by: kernel test robot <lkp@intel.com>
> > > Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> > > Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> > > ---
> > >  mm/list_lru.c | 2 +-
> > >  1 file changed, 1 insertion(+), 1 deletion(-)
> > >
> > > diff --git a/mm/list_lru.c b/mm/list_lru.c
> > > index 064d2018e265..e7e13513ff8e 100644
> > > --- a/mm/list_lru.c
> > > +++ b/mm/list_lru.c
> > > @@ -552,7 +552,7 @@ static int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru)
> > >                     }
> > >                     xas_unlock_irqrestore(&xas, flags);
> > >             } while (xas_nomem(&xas, GFP_KERNEL));
> > > -           if (mlru)
> > > +           if (unlikely(mlru))
> > >                     kfree(mlru);
> >
> > The report is saying not to check at all. So, just remove the check and
> > simply call kfree(mlru) as it handles the NULL check efficiently.
>
> I actually like it in this case. It's an "active comment" that this
> only happens in the failure case and we don't routinely free here.
>
> That said, does it have to free the mlru inside the loop at all? If
> the tree insertion fails, why not reuse it for the next attempt?

I agree with this implementation; reusing the mlru for the next attempt
when the tree insertion fails is more appropriate.
>
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index 7d69434c70e0..490473af3122 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
>                          gfp_t gfp)
>  {
>         unsigned long flags;
> -       struct list_lru_memcg *mlru;
> +       struct list_lru_memcg *mlru = NULL;
>         struct mem_cgroup *pos, *parent;
>         XA_STATE(xas, &lru->xa, 0);
>
> @@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
>                         parent = parent_mem_cgroup(pos);
>                 }
>
> -               mlru = memcg_init_list_lru_one(lru, gfp);
> -               if (!mlru)
> -                       return -ENOMEM;
> +               if (!mlru) {
> +                       mlru = memcg_init_list_lru_one(lru, gfp);
> +                       if (!mlru)
> +                               return -ENOMEM;
> +               }
>                 xas_set(&xas, pos->kmemcg_id);
>                 do {
>                         xas_lock_irqsave(&xas, flags);
> @@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
>                         }
>                         xas_unlock_irqrestore(&xas, flags);
>                 } while (xas_nomem(&xas, gfp));
> -               if (mlru)
> -                       kfree(mlru);
>         } while (pos != memcg && !css_is_dying(&pos->css));
>
> +       if (unlikely(mlru))
> +               kfree(mlru);
> +
>         return xas_error(&xas);
>  }
>  #else
>
Shakeel Butt Feb. 26, 2025, 9:08 p.m. UTC | #4
On Tue, Feb 25, 2025 at 08:11:47PM -0500, Johannes Weiner wrote:
> On Tue, Feb 25, 2025 at 08:23:12AM -0800, Shakeel Butt wrote:
> > On Tue, Feb 25, 2025 at 11:30:20PM +0800, Jingxiang Zeng wrote:
> > > From: Zeng Jingxiang <linuszeng@tencent.com>
> > > 
> > > In the following memcg_list_lru_alloc() function, mlru here is almost
> > > always NULL, so in most cases this should save a function call, mark
> > > mlru as unlikely to optimize the code.
> > >         do {
> > >                 xas_lock_irqsave(&xas, flags);
> > >                 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
> > >                         xas_store(&xas, mlru);
> > >                         if (!xas_error(&xas))
> > >                                 mlru = NULL;
> > >                 }
> > >                 xas_unlock_irqrestore(&xas, flags);
> > >         } while (xas_nomem(&xas, GFP_KERNEL));
> > > >       if (mlru)
> > >                 kfree(mlru);
> > > 
> > > Reported-by: kernel test robot <lkp@intel.com>
> > > Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> > > Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> > > ---
> > >  mm/list_lru.c | 2 +-
> > >  1 file changed, 1 insertion(+), 1 deletion(-)
> > > 
> > > diff --git a/mm/list_lru.c b/mm/list_lru.c
> > > index 064d2018e265..e7e13513ff8e 100644
> > > --- a/mm/list_lru.c
> > > +++ b/mm/list_lru.c
> > > @@ -552,7 +552,7 @@ static int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru)
> > >  			}
> > >  			xas_unlock_irqrestore(&xas, flags);
> > >  		} while (xas_nomem(&xas, GFP_KERNEL));
> > > -		if (mlru)
> > > +		if (unlikely(mlru))
> > >  			kfree(mlru);
> > 
> > The report is saying not to check at all. So, just remove the check and
> > simply call kfree(mlru) as it handles the NULL check efficiently.
> 
> I actually like it in this case. It's an "active comment" that this
> only happens in the failure case and we don't routinely free here.
> 
> That said, does it have to free the mlru inside the loop at all? If
> the tree insertion fails, why not reuse it for the next attempt?
> 
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index 7d69434c70e0..490473af3122 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
>  			 gfp_t gfp)
>  {
>  	unsigned long flags;
> -	struct list_lru_memcg *mlru;
> +	struct list_lru_memcg *mlru = NULL;
>  	struct mem_cgroup *pos, *parent;
>  	XA_STATE(xas, &lru->xa, 0);
>  
> @@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
>  			parent = parent_mem_cgroup(pos);
>  		}
>  
> -		mlru = memcg_init_list_lru_one(lru, gfp);
> -		if (!mlru)
> -			return -ENOMEM;
> +		if (!mlru) {
> +			mlru = memcg_init_list_lru_one(lru, gfp);
> +			if (!mlru)
> +				return -ENOMEM;
> +		}
>  		xas_set(&xas, pos->kmemcg_id);
>  		do {
>  			xas_lock_irqsave(&xas, flags);
> @@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
>  			}
>  			xas_unlock_irqrestore(&xas, flags);
>  		} while (xas_nomem(&xas, gfp));
> -		if (mlru)
> -			kfree(mlru);
>  	} while (pos != memcg && !css_is_dying(&pos->css));
>  
> +	if (unlikely(mlru))
> +		kfree(mlru);

Yup this looks good. Will unlikely() shutup the warning from bot?
Jingxiang Zeng Feb. 27, 2025, 8:03 a.m. UTC | #5
On Thu, 27 Feb 2025 at 05:08, Shakeel Butt <shakeel.butt@linux.dev> wrote:
>
> On Tue, Feb 25, 2025 at 08:11:47PM -0500, Johannes Weiner wrote:
> > On Tue, Feb 25, 2025 at 08:23:12AM -0800, Shakeel Butt wrote:
> > > On Tue, Feb 25, 2025 at 11:30:20PM +0800, Jingxiang Zeng wrote:
> > > > From: Zeng Jingxiang <linuszeng@tencent.com>
> > > >
> > > > In the following memcg_list_lru_alloc() function, mlru here is almost
> > > > always NULL, so in most cases this should save a function call, mark
> > > > mlru as unlikely to optimize the code.
> > > >         do {
> > > >                 xas_lock_irqsave(&xas, flags);
> > > >                 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
> > > >                         xas_store(&xas, mlru);
> > > >                         if (!xas_error(&xas))
> > > >                                 mlru = NULL;
> > > >                 }
> > > >                 xas_unlock_irqrestore(&xas, flags);
> > > >         } while (xas_nomem(&xas, GFP_KERNEL));
> > > > >       if (mlru)
> > > >                 kfree(mlru);
> > > >
> > > > Reported-by: kernel test robot <lkp@intel.com>
> > > > Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> > > > Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> > > > ---
> > > >  mm/list_lru.c | 2 +-
> > > >  1 file changed, 1 insertion(+), 1 deletion(-)
> > > >
> > > > diff --git a/mm/list_lru.c b/mm/list_lru.c
> > > > index 064d2018e265..e7e13513ff8e 100644
> > > > --- a/mm/list_lru.c
> > > > +++ b/mm/list_lru.c
> > > > @@ -552,7 +552,7 @@ static int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru)
> > > >                   }
> > > >                   xas_unlock_irqrestore(&xas, flags);
> > > >           } while (xas_nomem(&xas, GFP_KERNEL));
> > > > -         if (mlru)
> > > > +         if (unlikely(mlru))
> > > >                   kfree(mlru);
> > >
> > > The report is saying not to check at all. So, just remove the check and
> > > simply call kfree(mlru) as it handles the NULL check efficiently.
> >
> > I actually like it in this case. It's an "active comment" that this
> > only happens in the failure case and we don't routinely free here.
> >
> > That said, does it have to free the mlru inside the loop at all? If
> > the tree insertion fails, why not reuse it for the next attempt?
> >
> > diff --git a/mm/list_lru.c b/mm/list_lru.c
> > index 7d69434c70e0..490473af3122 100644
> > --- a/mm/list_lru.c
> > +++ b/mm/list_lru.c
> > @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
> >                        gfp_t gfp)
> >  {
> >       unsigned long flags;
> > -     struct list_lru_memcg *mlru;
> > +     struct list_lru_memcg *mlru = NULL;
> >       struct mem_cgroup *pos, *parent;
> >       XA_STATE(xas, &lru->xa, 0);
> >
> > @@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
> >                       parent = parent_mem_cgroup(pos);
> >               }
> >
> > -             mlru = memcg_init_list_lru_one(lru, gfp);
> > -             if (!mlru)
> > -                     return -ENOMEM;
> > +             if (!mlru) {
> > +                     mlru = memcg_init_list_lru_one(lru, gfp);
> > +                     if (!mlru)
> > +                             return -ENOMEM;
> > +             }
> >               xas_set(&xas, pos->kmemcg_id);
> >               do {
> >                       xas_lock_irqsave(&xas, flags);
> > @@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
> >                       }
> >                       xas_unlock_irqrestore(&xas, flags);
> >               } while (xas_nomem(&xas, gfp));
> > -             if (mlru)
> > -                     kfree(mlru);
> >       } while (pos != memcg && !css_is_dying(&pos->css));
> >
> > +     if (unlikely(mlru))
> > +             kfree(mlru);
>
> Yup this looks good. Will unlikely() shutup the warning from bot?
>
I verified it locally using the COCCI test,COCCI check no longer
reports the NULL check error.
diff mbox series

Patch

diff --git a/mm/list_lru.c b/mm/list_lru.c
index 064d2018e265..e7e13513ff8e 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -552,7 +552,7 @@  static int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru)
 			}
 			xas_unlock_irqrestore(&xas, flags);
 		} while (xas_nomem(&xas, GFP_KERNEL));
-		if (mlru)
+		if (unlikely(mlru))
 			kfree(mlru);
 		set_active_memcg(cur);
 	} while (pos != memcg && !css_is_dying(&pos->css));