diff mbox series

[V1] mm/list_lru: make the case where mlru is NULL as unlikely

Message ID 20250227082223.1173847-1-jingxiangzeng.cas@gmail.com (mailing list archive)
State New
Headers show
Series [V1] mm/list_lru: make the case where mlru is NULL as unlikely | expand

Commit Message

Jingxiang Zeng Feb. 27, 2025, 8:22 a.m. UTC
From: Zeng Jingxiang <linuszeng@tencent.com>

In the following memcg_list_lru_alloc() function, mlru here is almost
always NULL, so in most cases this should save a function call, mark
mlru as unlikely to optimize the code, and reusing the mlru for the
next attempt when the tree insertion fails.
        do {
                xas_lock_irqsave(&xas, flags);
                if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
                        xas_store(&xas, mlru);
                        if (!xas_error(&xas))
                                mlru = NULL;
                }
                xas_unlock_irqrestore(&xas, flags);
        } while (xas_nomem(&xas, GFP_KERNEL));
>       if (mlru)
                kfree(mlru);

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/list_lru.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

Comments

Muchun Song Feb. 27, 2025, 12:19 p.m. UTC | #1
> On Feb 27, 2025, at 16:22, Jingxiang Zeng <jingxiangzeng.cas@gmail.com> wrote:
> 
> From: Zeng Jingxiang <linuszeng@tencent.com>
> 
> In the following memcg_list_lru_alloc() function, mlru here is almost
> always NULL, so in most cases this should save a function call, mark
> mlru as unlikely to optimize the code, and reusing the mlru for the
> next attempt when the tree insertion fails.
>        do {
>                xas_lock_irqsave(&xas, flags);
>                if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
>                        xas_store(&xas, mlru);
>                        if (!xas_error(&xas))
>                                mlru = NULL;
>                }
>                xas_unlock_irqrestore(&xas, flags);
>        } while (xas_nomem(&xas, GFP_KERNEL));
>>      if (mlru)
>                kfree(mlru);
> 
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> Suggested-by: Johannes Weiner <hannes@cmpxchg.org>

Reviewed-by: Muchun Song <muchun.song@linux.dev>

Thanks.
Johannes Weiner Feb. 27, 2025, 2:42 p.m. UTC | #2
On Thu, Feb 27, 2025 at 04:22:23PM +0800, Jingxiang Zeng wrote:
> From: Zeng Jingxiang <linuszeng@tencent.com>
> 
> In the following memcg_list_lru_alloc() function, mlru here is almost
> always NULL, so in most cases this should save a function call, mark
> mlru as unlikely to optimize the code, and reusing the mlru for the
> next attempt when the tree insertion fails.
>         do {
>                 xas_lock_irqsave(&xas, flags);
>                 if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
>                         xas_store(&xas, mlru);
>                         if (!xas_error(&xas))
>                                 mlru = NULL;
>                 }
>                 xas_unlock_irqrestore(&xas, flags);
>         } while (xas_nomem(&xas, GFP_KERNEL));
> >       if (mlru)
>                 kfree(mlru);
> 
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/
> Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com>
> Suggested-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Johannes Weiner <hannes@cmpxchg.org>
diff mbox series

Patch

diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7d69434c70e0..490473af3122 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -510,7 +510,7 @@  int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
 			 gfp_t gfp)
 {
 	unsigned long flags;
-	struct list_lru_memcg *mlru;
+	struct list_lru_memcg *mlru = NULL;
 	struct mem_cgroup *pos, *parent;
 	XA_STATE(xas, &lru->xa, 0);
 
@@ -535,9 +535,11 @@  int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
 			parent = parent_mem_cgroup(pos);
 		}
 
-		mlru = memcg_init_list_lru_one(lru, gfp);
-		if (!mlru)
-			return -ENOMEM;
+		if (!mlru) {
+			mlru = memcg_init_list_lru_one(lru, gfp);
+			if (!mlru)
+				return -ENOMEM;
+		}
 		xas_set(&xas, pos->kmemcg_id);
 		do {
 			xas_lock_irqsave(&xas, flags);
@@ -548,10 +550,11 @@  int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
 			}
 			xas_unlock_irqrestore(&xas, flags);
 		} while (xas_nomem(&xas, gfp));
-		if (mlru)
-			kfree(mlru);
 	} while (pos != memcg && !css_is_dying(&pos->css));
 
+	if (unlikely(mlru))
+		kfree(mlru);
+
 	return xas_error(&xas);
 }
 #else