diff mbox series

[PATCH-next,v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT

Message ID 20211210025228.158196-1-longman@redhat.com (mailing list archive)
State New
Headers show
Series [PATCH-next,v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT | expand

Commit Message

Waiman Long Dec. 10, 2021, 2:52 a.m. UTC
Direct calls to local_irq_{save/restore}() and preempt_{enable/disable}()
are not appropriate for PREEMPT_RT. To provide better PREEMPT_RT support,
change local_irq_{save/restore}() to local_lock_irq{save/restore}() and
add a local_lock_t to struct memcg_stock_pcp.

Also disable the task and interrupt context optimization for obj_stock as
there will be no performance gain in the case of PREEMPT_RT. In this case,
task obj_stock will be there but remain unused.

Note that preempt_enable() and preempt_disable() in get_obj_stock() and
put_obj_stock() are not replaced by local_lock() and local_unlock() as it
is possible that a task accessing task_obj may get interrupted and then
access irq_obj concurrently. So using local_lock for task_obj access
may cause lockdep splat.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 mm/memcontrol.c | 41 ++++++++++++++++++++++-------------------
 1 file changed, 22 insertions(+), 19 deletions(-)

Comments

Sebastian Andrzej Siewior Dec. 10, 2021, 1:01 p.m. UTC | #1
On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
…
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  	struct memcg_stock_pcp *stock;
>  	unsigned long flags;
>  
> -	local_irq_save(flags);
> +	local_lock_irqsave(&memcg_stock.lock, flags);

Why is this one using the lock? It isn't accessing irq_obj, right?

>  	stock = this_cpu_ptr(&memcg_stock);
>  	if (stock->cached != memcg) { /* reset if necessary */
> @@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>   * which is cheap in non-preempt kernel. The interrupt context object stock
>   * can only be accessed after disabling interrupt. User context code can
>   * access interrupt object stock, but not vice versa.
> + *
> + * This task and interrupt context optimization is disabled for PREEMPT_RT
> + * as there is no performance gain in this case.
>   */
>  static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
>  {
> -	struct memcg_stock_pcp *stock;
> -
> -	if (likely(in_task())) {
> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
>  		*pflags = 0UL;
>  		preempt_disable();
> -		stock = this_cpu_ptr(&memcg_stock);
> -		return &stock->task_obj;
> +		return this_cpu_ptr(&memcg_stock.task_obj);
>  	}

We usually add the local_lock_t to the object it protects, struct
obj_stock it this case.
That would give you two different locks (instead of one) so you wouldn't
have to use preempt_disable() to avoid lockdep's complains. Also it
would warn you if you happen to use that obj_stock in !in_task() which
is isn't possible now.
The only downside would be that drain_local_stock() needs to acquire two
locks.

>  
> -	local_irq_save(*pflags);
> -	stock = this_cpu_ptr(&memcg_stock);
> -	return &stock->irq_obj;
> +	local_lock_irqsave(&memcg_stock.lock, *pflags);
> +	return this_cpu_ptr(&memcg_stock.irq_obj);
>  }
>  
>  static inline void put_obj_stock(unsigned long flags)
>  {
> -	if (likely(in_task()))
> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT))
>  		preempt_enable();
>  	else
> -		local_irq_restore(flags);
> +		local_unlock_irqrestore(&memcg_stock.lock, flags);
>  }
>  
>  /*

Sebastian
Waiman Long Dec. 10, 2021, 4:29 p.m. UTC | #2
On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
> On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
> …
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
> …
>> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   	struct memcg_stock_pcp *stock;
>>   	unsigned long flags;
>>   
>> -	local_irq_save(flags);
>> +	local_lock_irqsave(&memcg_stock.lock, flags);
> Why is this one using the lock? It isn't accessing irq_obj, right?
Well, the lock isn't just for irq_obj. It protects the whole memcg_stock 
structure which include irq_obj. Sometimes, data in irq_obj (or 
task_obj) will get transfer to nr_pages and vice versa. So it is easier 
to use one single lock for the whole thing.
>
>>   	stock = this_cpu_ptr(&memcg_stock);
>>   	if (stock->cached != memcg) { /* reset if necessary */
>> @@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>>    * which is cheap in non-preempt kernel. The interrupt context object stock
>>    * can only be accessed after disabling interrupt. User context code can
>>    * access interrupt object stock, but not vice versa.
>> + *
>> + * This task and interrupt context optimization is disabled for PREEMPT_RT
>> + * as there is no performance gain in this case.
>>    */
>>   static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
>>   {
>> -	struct memcg_stock_pcp *stock;
>> -
>> -	if (likely(in_task())) {
>> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
>>   		*pflags = 0UL;
>>   		preempt_disable();
>> -		stock = this_cpu_ptr(&memcg_stock);
>> -		return &stock->task_obj;
>> +		return this_cpu_ptr(&memcg_stock.task_obj);
>>   	}
> We usually add the local_lock_t to the object it protects, struct
> obj_stock it this case.
> That would give you two different locks (instead of one) so you wouldn't
> have to use preempt_disable() to avoid lockdep's complains. Also it
> would warn you if you happen to use that obj_stock in !in_task() which
> is isn't possible now.
> The only downside would be that drain_local_stock() needs to acquire two
> locks.
>
As said above, having separate locks will complicate the interaction 
between irq_obj and the broader memcg_stock fields. Besides throughput 
is a less important matrix for PREEMPT_RT, so I am not trying to 
optimize throughput performance for PREEMPT_RT here.

Cheers,
Longman
Sebastian Andrzej Siewior Dec. 10, 2021, 4:34 p.m. UTC | #3
On 2021-12-10 11:29:31 [-0500], Waiman Long wrote:
> 
> On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
> > On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
> > …
> > > --- a/mm/memcontrol.c
> > > +++ b/mm/memcontrol.c
> > …
> > > @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> > >   	struct memcg_stock_pcp *stock;
> > >   	unsigned long flags;
> > > -	local_irq_save(flags);
> > > +	local_lock_irqsave(&memcg_stock.lock, flags);
> > Why is this one using the lock? It isn't accessing irq_obj, right?
> Well, the lock isn't just for irq_obj. It protects the whole memcg_stock
> structure which include irq_obj. Sometimes, data in irq_obj (or task_obj)
> will get transfer to nr_pages and vice versa. So it is easier to use one
> single lock for the whole thing.

This needs way better documentation what protects what any why.
I don't like the quick slapping for RT only usage without any kind of
explanation. Once you think you know it is irq_obj only you end up here
where you have the lock again for no obvious reason.

> 
> Cheers,
> Longman

Sebastian
Waiman Long Dec. 10, 2021, 4:37 p.m. UTC | #4
On 12/10/21 11:34, Sebastian Andrzej Siewior wrote:
> On 2021-12-10 11:29:31 [-0500], Waiman Long wrote:
>> On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
>>> On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
>>> …
>>>> --- a/mm/memcontrol.c
>>>> +++ b/mm/memcontrol.c
>>> …
>>>> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>>>    	struct memcg_stock_pcp *stock;
>>>>    	unsigned long flags;
>>>> -	local_irq_save(flags);
>>>> +	local_lock_irqsave(&memcg_stock.lock, flags);
>>> Why is this one using the lock? It isn't accessing irq_obj, right?
>> Well, the lock isn't just for irq_obj. It protects the whole memcg_stock
>> structure which include irq_obj. Sometimes, data in irq_obj (or task_obj)
>> will get transfer to nr_pages and vice versa. So it is easier to use one
>> single lock for the whole thing.
> This needs way better documentation what protects what any why.
> I don't like the quick slapping for RT only usage without any kind of
> explanation. Once you think you know it is irq_obj only you end up here
> where you have the lock again for no obvious reason.

Sure, I will update the patch description and add comments to document 
that. Let's see what other feedback I have before I make the update.

Cheers,
Longman
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a09a7d2e0b1b..8bed8e2993e4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2097,6 +2097,7 @@  struct obj_stock {
 };
 
 struct memcg_stock_pcp {
+	local_lock_t lock;
 	struct mem_cgroup *cached; /* this never be root cgroup */
 	unsigned int nr_pages;
 	struct obj_stock task_obj;
@@ -2145,7 +2146,7 @@  static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (nr_pages > MEMCG_CHARGE_BATCH)
 		return ret;
 
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2153,7 +2154,7 @@  static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 		ret = true;
 	}
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 
 	return ret;
 }
@@ -2189,7 +2190,7 @@  static void drain_local_stock(struct work_struct *dummy)
 	 * drain_stock races is that we always operate on local CPU stock
 	 * here with IRQ disabled
 	 */
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	drain_obj_stock(&stock->irq_obj);
@@ -2198,7 +2199,7 @@  static void drain_local_stock(struct work_struct *dummy)
 	drain_stock(stock);
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -2210,7 +2211,7 @@  static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	struct memcg_stock_pcp *stock;
 	unsigned long flags;
 
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (stock->cached != memcg) { /* reset if necessary */
@@ -2223,7 +2224,7 @@  static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
 		drain_stock(stock);
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -2779,29 +2780,28 @@  static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
  * which is cheap in non-preempt kernel. The interrupt context object stock
  * can only be accessed after disabling interrupt. User context code can
  * access interrupt object stock, but not vice versa.
+ *
+ * This task and interrupt context optimization is disabled for PREEMPT_RT
+ * as there is no performance gain in this case.
  */
 static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
 {
-	struct memcg_stock_pcp *stock;
-
-	if (likely(in_task())) {
+	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
 		*pflags = 0UL;
 		preempt_disable();
-		stock = this_cpu_ptr(&memcg_stock);
-		return &stock->task_obj;
+		return this_cpu_ptr(&memcg_stock.task_obj);
 	}
 
-	local_irq_save(*pflags);
-	stock = this_cpu_ptr(&memcg_stock);
-	return &stock->irq_obj;
+	local_lock_irqsave(&memcg_stock.lock, *pflags);
+	return this_cpu_ptr(&memcg_stock.irq_obj);
 }
 
 static inline void put_obj_stock(unsigned long flags)
 {
-	if (likely(in_task()))
+	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
 	else
-		local_irq_restore(flags);
+		local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -7088,9 +7088,12 @@  static int __init mem_cgroup_init(void)
 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
 				  memcg_hotplug_cpu_dead);
 
-	for_each_possible_cpu(cpu)
-		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
-			  drain_local_stock);
+	for_each_possible_cpu(cpu) {
+		struct memcg_stock_pcp *stock = per_cpu_ptr(&memcg_stock, cpu);
+
+		INIT_WORK(&stock->work, drain_local_stock);
+		local_lock_init(&stock->lock);
+	}
 
 	for_each_node(node) {
 		struct mem_cgroup_tree_per_node *rtpn;