diff mbox series

[v2,2/2] mm: memcg: use READ_ONCE()/WRITE_ONCE() to access stock->cached

Message ID 20230502160839.361544-2-roman.gushchin@linux.dev (mailing list archive)
State New
Headers show
Series [v2,1/2] mm: kmem: fix a NULL pointer dereference in obj_stock_flush_required() | expand

Commit Message

Roman Gushchin May 2, 2023, 4:08 p.m. UTC
A memcg pointer in the percpu stock can be accessed by drain_all_stock()
from another cpu in a lockless way.
In theory it might lead to an issue, similar to the one which has been
discovered with stock->cached_objcg, where the pointer was zeroed
between the check for being NULL and dereferencing.
In this case the issue is unlikely a real problem, but to make it
bulletproof and similar to stock->cached_objcg, let's annotate all
accesses to stock->cached with READ_ONCE()/WTRITE_ONCE().

Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
---
 mm/memcontrol.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Yosry Ahmed May 2, 2023, 8:21 p.m. UTC | #1
On Tue, May 2, 2023 at 9:09 AM Roman Gushchin <roman.gushchin@linux.dev> wrote:
>
> A memcg pointer in the percpu stock can be accessed by drain_all_stock()
> from another cpu in a lockless way.
> In theory it might lead to an issue, similar to the one which has been
> discovered with stock->cached_objcg, where the pointer was zeroed
> between the check for being NULL and dereferencing.
> In this case the issue is unlikely a real problem, but to make it
> bulletproof and similar to stock->cached_objcg, let's annotate all
> accesses to stock->cached with READ_ONCE()/WTRITE_ONCE().

Is it time to rename that to cached_memcg? :)

Anyway, same comment as patch 1 about annotating all reads with
READ_ONCE() vs. singling out the racy read.

>
> Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Yosry Ahmed <yosryahmed@google.com>
> Cc: Shakeel Butt <shakeelb@google.com>
> ---
>  mm/memcontrol.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index c823c35c2ed4..1e364ad495a3 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2275,7 +2275,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>         local_lock_irqsave(&memcg_stock.stock_lock, flags);
>
>         stock = this_cpu_ptr(&memcg_stock);
> -       if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
> +       if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
>                 stock->nr_pages -= nr_pages;
>                 ret = true;
>         }
> @@ -2290,7 +2290,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>   */
>  static void drain_stock(struct memcg_stock_pcp *stock)
>  {
> -       struct mem_cgroup *old = stock->cached;
> +       struct mem_cgroup *old = READ_ONCE(stock->cached);
>
>         if (!old)
>                 return;
> @@ -2303,7 +2303,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
>         }
>
>         css_put(&old->css);
> -       stock->cached = NULL;
> +       WRITE_ONCE(stock->cached, NULL);

Is it me or can we call drain_stock() from memcg_hotplug_cpu_dead()
without holding the lock, unlike all other callers. Is this a problem?

>  }
>
>  static void drain_local_stock(struct work_struct *dummy)
> @@ -2338,10 +2338,10 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>         struct memcg_stock_pcp *stock;
>
>         stock = this_cpu_ptr(&memcg_stock);
> -       if (stock->cached != memcg) { /* reset if necessary */
> +       if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
>                 drain_stock(stock);
>                 css_get(&memcg->css);
> -               stock->cached = memcg;
> +               WRITE_ONCE(stock->cached, memcg);
>         }
>         stock->nr_pages += nr_pages;
>
> @@ -2383,7 +2383,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
>                 bool flush = false;
>
>                 rcu_read_lock();
> -               memcg = stock->cached;
> +               memcg = READ_ONCE(stock->cached);
>                 if (memcg && stock->nr_pages &&
>                     mem_cgroup_is_descendant(memcg, root_memcg))
>                         flush = true;
> --
> 2.40.1
>
Shakeel Butt May 3, 2023, 5:06 p.m. UTC | #2
On Tue, May 02, 2023 at 09:08:39AM -0700, Roman Gushchin wrote:
> A memcg pointer in the percpu stock can be accessed by drain_all_stock()
> from another cpu in a lockless way.
> In theory it might lead to an issue, similar to the one which has been
> discovered with stock->cached_objcg, where the pointer was zeroed
> between the check for being NULL and dereferencing.
> In this case the issue is unlikely a real problem, but to make it
> bulletproof and similar to stock->cached_objcg, let's annotate all
> accesses to stock->cached with READ_ONCE()/WTRITE_ONCE().
> 
> Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Yosry Ahmed <yosryahmed@google.com>
> Cc: Shakeel Butt <shakeelb@google.com>

Acked-by: Shakeel Butt <shakeelb@google.com>
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c823c35c2ed4..1e364ad495a3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2275,7 +2275,7 @@  static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
-	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+	if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
 		stock->nr_pages -= nr_pages;
 		ret = true;
 	}
@@ -2290,7 +2290,7 @@  static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  */
 static void drain_stock(struct memcg_stock_pcp *stock)
 {
-	struct mem_cgroup *old = stock->cached;
+	struct mem_cgroup *old = READ_ONCE(stock->cached);
 
 	if (!old)
 		return;
@@ -2303,7 +2303,7 @@  static void drain_stock(struct memcg_stock_pcp *stock)
 	}
 
 	css_put(&old->css);
-	stock->cached = NULL;
+	WRITE_ONCE(stock->cached, NULL);
 }
 
 static void drain_local_stock(struct work_struct *dummy)
@@ -2338,10 +2338,10 @@  static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	struct memcg_stock_pcp *stock;
 
 	stock = this_cpu_ptr(&memcg_stock);
-	if (stock->cached != memcg) { /* reset if necessary */
+	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
 		drain_stock(stock);
 		css_get(&memcg->css);
-		stock->cached = memcg;
+		WRITE_ONCE(stock->cached, memcg);
 	}
 	stock->nr_pages += nr_pages;
 
@@ -2383,7 +2383,7 @@  static void drain_all_stock(struct mem_cgroup *root_memcg)
 		bool flush = false;
 
 		rcu_read_lock();
-		memcg = stock->cached;
+		memcg = READ_ONCE(stock->cached);
 		if (memcg && stock->nr_pages &&
 		    mem_cgroup_is_descendant(memcg, root_memcg))
 			flush = true;