diff mbox series

[4/9] memcg: manually inline __refill_stock

Message ID 20250315174930.1769599-5-shakeel.butt@linux.dev (mailing list archive)
State New
Headers show
Series memcg: cleanup per-cpu stock | expand

Commit Message

Shakeel Butt March 15, 2025, 5:49 p.m. UTC
There are no more multiple callers of __refill_stock(), so simply inline
it to refill_stock().

Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/memcontrol.c | 32 ++++++++++++--------------------
 1 file changed, 12 insertions(+), 20 deletions(-)

Comments

Roman Gushchin March 18, 2025, 12:58 a.m. UTC | #1
On Sat, Mar 15, 2025 at 10:49:25AM -0700, Shakeel Butt wrote:
> There are no more multiple callers of __refill_stock(), so simply inline
> it to refill_stock().
> 
> Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/memcontrol.c | 32 ++++++++++++--------------------
>  1 file changed, 12 insertions(+), 20 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index b54e3a1d23bd..7054b0ebd207 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1865,14 +1865,21 @@ static void drain_local_stock(struct work_struct *dummy)
>  	obj_cgroup_put(old);
>  }
>  
> -/*
> - * Cache charges(val) to local per_cpu area.
> - * This will be consumed by consume_stock() function, later.
> - */
> -static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> +/* Should never be called with root_mem_cgroup. */

How about adding something like this?

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 768d6b15dbfa..5c26002f2168 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1881,6 +1881,8 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        unsigned long flags;

+       VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
+
        local_lock_irqsave(&memcg_stock.stock_lock, flags);
        __refill_stock(memcg, nr_pages);
        local_unlock_irqrestore(&memcg_stock.stock_lock, flags);


Other than that,
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Vlastimil Babka March 18, 2025, 7:58 a.m. UTC | #2
On 3/18/25 01:58, Roman Gushchin wrote:
> On Sat, Mar 15, 2025 at 10:49:25AM -0700, Shakeel Butt wrote:
>> There are no more multiple callers of __refill_stock(), so simply inline
>> it to refill_stock().
>> 
>> Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
>> Acked-by: Vlastimil Babka <vbabka@suse.cz>
>> ---
>>  mm/memcontrol.c | 32 ++++++++++++--------------------
>>  1 file changed, 12 insertions(+), 20 deletions(-)
>> 
>> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
>> index b54e3a1d23bd..7054b0ebd207 100644
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
>> @@ -1865,14 +1865,21 @@ static void drain_local_stock(struct work_struct *dummy)
>>  	obj_cgroup_put(old);
>>  }
>>  
>> -/*
>> - * Cache charges(val) to local per_cpu area.
>> - * This will be consumed by consume_stock() function, later.
>> - */
>> -static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>> +/* Should never be called with root_mem_cgroup. */
> 
> How about adding something like this?
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 768d6b15dbfa..5c26002f2168 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1881,6 +1881,8 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  {
>         unsigned long flags;
> 
> +       VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));

Already in patch 1 though? But I agree.

> +
>         local_lock_irqsave(&memcg_stock.stock_lock, flags);
>         __refill_stock(memcg, nr_pages);
>         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
> 
> 
> Other than that,
> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b54e3a1d23bd..7054b0ebd207 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1865,14 +1865,21 @@  static void drain_local_stock(struct work_struct *dummy)
 	obj_cgroup_put(old);
 }
 
-/*
- * Cache charges(val) to local per_cpu area.
- * This will be consumed by consume_stock() function, later.
- */
-static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+/* Should never be called with root_mem_cgroup. */
+static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
 	struct memcg_stock_pcp *stock;
 	unsigned int stock_pages;
+	unsigned long flags;
+
+	if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
+		/*
+		 * In case of unlikely failure to lock percpu stock_lock
+		 * uncharge memcg directly.
+		 */
+		memcg_uncharge(memcg, nr_pages);
+		return;
+	}
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
@@ -1885,22 +1892,7 @@  static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 
 	if (stock_pages > MEMCG_CHARGE_BATCH)
 		drain_stock(stock);
-}
 
-/* Should never be called with root_mem_cgroup. */
-static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
-{
-	unsigned long flags;
-
-	if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
-		/*
-		 * In case of unlikely failure to lock percpu stock_lock
-		 * uncharge memcg directly.
-		 */
-		memcg_uncharge(memcg, nr_pages);
-		return;
-	}
-	__refill_stock(memcg, nr_pages);
 	localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
 }