diff mbox series

[v4,1/2] rcu: Eliminate rcu_state.nocb_is_setup

Message ID 20220817014253.1982-2-thunder.leizhen@huawei.com (mailing list archive)
State Superseded
Headers show
Series rcu/nocb: Delete local variable 'need_rcu_nocb_mask' in rcu_init_nohz() | expand

Commit Message

Leizhen (ThunderTown) Aug. 17, 2022, 1:42 a.m. UTC
'rcu_state.nocb_is_setup' is initialized to true only if 'rcu_nocb_mask'
successfully allocates memory. So it can be replaced by
'cpumask_available(rcu_nocb_mask)'. More importantly, the latter is more
intuitive, and it has been used in several places.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
---
 kernel/rcu/tree.h      | 1 -
 kernel/rcu/tree_nocb.h | 8 +++-----
 2 files changed, 3 insertions(+), 6 deletions(-)

Comments

Paul E. McKenney Aug. 22, 2022, 4:34 p.m. UTC | #1
On Wed, Aug 17, 2022 at 09:42:52AM +0800, Zhen Lei wrote:
> 'rcu_state.nocb_is_setup' is initialized to true only if 'rcu_nocb_mask'
> successfully allocates memory. So it can be replaced by
> 'cpumask_available(rcu_nocb_mask)'. More importantly, the latter is more
> intuitive, and it has been used in several places.
> 
> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>

One of the implementations of cpumask_available() does indeed check
for NULL.  But here is the other one:

static inline bool cpumask_available(cpumask_var_t mask)
{
	return true;
}

So I have to ask...  In a kernel built with CONFIG_CPUMASK_OFFSTACK=n,
will this change really work?

Another important question is "Do all of the existing uses of
cpumask_available() really work?"  Yes, I do believe that they do
work, but it would be good to get another set of eyes on that code.
"All software developers are blind!"  ;-)

							Thanx, Paul

> ---
>  kernel/rcu/tree.h      | 1 -
>  kernel/rcu/tree_nocb.h | 8 +++-----
>  2 files changed, 3 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index d4a97e40ea9c3e2..06f659c63d2d192 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -375,7 +375,6 @@ struct rcu_state {
>  	arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
>  						/* Synchronize offline with */
>  						/*  GP pre-initialization. */
> -	int nocb_is_setup;			/* nocb is setup from boot */
>  };
>  
>  /* Values for rcu_state structure's gp_flags field. */
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index 0a5f0ef41484518..ff763e7dc53551f 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -69,7 +69,6 @@ static int __init rcu_nocb_setup(char *str)
>  			cpumask_setall(rcu_nocb_mask);
>  		}
>  	}
> -	rcu_state.nocb_is_setup = true;
>  	return 1;
>  }
>  __setup("rcu_nocbs", rcu_nocb_setup);
> @@ -1215,7 +1214,7 @@ void __init rcu_init_nohz(void)
>  	struct rcu_data *rdp;
>  
>  #if defined(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL)
> -	if (!rcu_state.nocb_is_setup) {
> +	if (!cpumask_available(rcu_nocb_mask)) {
>  		need_rcu_nocb_mask = true;
>  		offload_all = true;
>  	}
> @@ -1235,10 +1234,9 @@ void __init rcu_init_nohz(void)
>  				return;
>  			}
>  		}
> -		rcu_state.nocb_is_setup = true;
>  	}
>  
> -	if (!rcu_state.nocb_is_setup)
> +	if (!cpumask_available(rcu_nocb_mask))
>  		return;
>  
>  #if defined(CONFIG_NO_HZ_FULL)
> @@ -1299,7 +1297,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
>  	struct task_struct *t;
>  	struct sched_param sp;
>  
> -	if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
> +	if (!rcu_scheduler_fully_active || !cpumask_available(rcu_nocb_mask))
>  		return;
>  
>  	/* If there already is an rcuo kthread, then nothing to do. */
> -- 
> 2.25.1
>
Leizhen (ThunderTown) Aug. 23, 2022, noon UTC | #2
On 2022/8/23 0:34, Paul E. McKenney wrote:
> On Wed, Aug 17, 2022 at 09:42:52AM +0800, Zhen Lei wrote:
>> 'rcu_state.nocb_is_setup' is initialized to true only if 'rcu_nocb_mask'
>> successfully allocates memory. So it can be replaced by
>> 'cpumask_available(rcu_nocb_mask)'. More importantly, the latter is more
>> intuitive, and it has been used in several places.
>>
>> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
> 
> One of the implementations of cpumask_available() does indeed check
> for NULL.  But here is the other one:
> 
> static inline bool cpumask_available(cpumask_var_t mask)
> {
> 	return true;
> }

Thanks for the heads-up.

> 
> So I have to ask...  In a kernel built with CONFIG_CPUMASK_OFFSTACK=n,
> will this change really work?

Yes, I run the test cases on arm64, which does not turn on option
CONFIG_CPUMASK_OFFSTACK by default.

I've listed a combination of build options in the cover-letter. In
this case, cpumask_empty(rcu_nocb_mask) is true.
----
CONFIG_NO_HZ_FULL=n, CONFIG_RCU_NOCB_CPU_DEFAULT_ALL=n, cmdline without rcu_nocbs
[    0.000000] rcu:     Offload RCU callbacks from CPUs: (none).
----

> 
> Another important question is "Do all of the existing uses of
> cpumask_available() really work?"  Yes, I do believe that they do

The only functional change caused by this patch is:
For clarity, CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_DEFAULT_ALL=n are
omitted from the following conditions.

When
  CONFIG_NO_HZ_FULL=n and boot cmdline without 'rcu_nocbs='.
or
  CONFIG_NO_HZ_FULL=y and boot cmdline without 'nohz_full='.

The rdp->nocb_gp_kthread and rdp->nocb_cb_kthread threads are still created.

But we have provided EXPORT_SYMBOL_GPL functions rcu_nocb_cpu_deoffload() and
rcu_nocb_cpu_offload(), which can dynamically modify 'rcu_nocb_mask'. So it
seems appropriate to prepare these threads in advance. Of course, it looks like
only 'rcutorture' currently uses these two functions now.

Otherwise, we can do some optimization:
If none of the CPUs in a 'nocb_gp' group is marked in rcu_nocb_mask, this
grouping does not need to create corresponding threads "rcuog/%d" and "rcuo%c/%d".

And in rcu_init_nohz():
-       if (!rcu_state.nocb_is_setup)
+       if (!cpumask_available(rcu_nocb_mask) || cpumask_empty(rcu_nocb_mask))
                return;



> work, but it would be good to get another set of eyes on that code.
> "All software developers are blind!"  ;-)
> 
> 							Thanx, Paul
> 
>> ---
>>  kernel/rcu/tree.h      | 1 -
>>  kernel/rcu/tree_nocb.h | 8 +++-----
>>  2 files changed, 3 insertions(+), 6 deletions(-)
>>
>> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
>> index d4a97e40ea9c3e2..06f659c63d2d192 100644
>> --- a/kernel/rcu/tree.h
>> +++ b/kernel/rcu/tree.h
>> @@ -375,7 +375,6 @@ struct rcu_state {
>>  	arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
>>  						/* Synchronize offline with */
>>  						/*  GP pre-initialization. */
>> -	int nocb_is_setup;			/* nocb is setup from boot */
>>  };
>>  
>>  /* Values for rcu_state structure's gp_flags field. */
>> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
>> index 0a5f0ef41484518..ff763e7dc53551f 100644
>> --- a/kernel/rcu/tree_nocb.h
>> +++ b/kernel/rcu/tree_nocb.h
>> @@ -69,7 +69,6 @@ static int __init rcu_nocb_setup(char *str)
>>  			cpumask_setall(rcu_nocb_mask);
>>  		}
>>  	}
>> -	rcu_state.nocb_is_setup = true;
>>  	return 1;
>>  }
>>  __setup("rcu_nocbs", rcu_nocb_setup);
>> @@ -1215,7 +1214,7 @@ void __init rcu_init_nohz(void)
>>  	struct rcu_data *rdp;
>>  
>>  #if defined(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL)
>> -	if (!rcu_state.nocb_is_setup) {
>> +	if (!cpumask_available(rcu_nocb_mask)) {
>>  		need_rcu_nocb_mask = true;
>>  		offload_all = true;
>>  	}
>> @@ -1235,10 +1234,9 @@ void __init rcu_init_nohz(void)
>>  				return;
>>  			}
>>  		}
>> -		rcu_state.nocb_is_setup = true;
>>  	}
>>  
>> -	if (!rcu_state.nocb_is_setup)
>> +	if (!cpumask_available(rcu_nocb_mask))
>>  		return;
>>  
>>  #if defined(CONFIG_NO_HZ_FULL)
>> @@ -1299,7 +1297,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
>>  	struct task_struct *t;
>>  	struct sched_param sp;
>>  
>> -	if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
>> +	if (!rcu_scheduler_fully_active || !cpumask_available(rcu_nocb_mask))
>>  		return;
>>  
>>  	/* If there already is an rcuo kthread, then nothing to do. */
>> -- 
>> 2.25.1
>>
> .
>
diff mbox series

Patch

diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d4a97e40ea9c3e2..06f659c63d2d192 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -375,7 +375,6 @@  struct rcu_state {
 	arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
 						/* Synchronize offline with */
 						/*  GP pre-initialization. */
-	int nocb_is_setup;			/* nocb is setup from boot */
 };
 
 /* Values for rcu_state structure's gp_flags field. */
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 0a5f0ef41484518..ff763e7dc53551f 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -69,7 +69,6 @@  static int __init rcu_nocb_setup(char *str)
 			cpumask_setall(rcu_nocb_mask);
 		}
 	}
-	rcu_state.nocb_is_setup = true;
 	return 1;
 }
 __setup("rcu_nocbs", rcu_nocb_setup);
@@ -1215,7 +1214,7 @@  void __init rcu_init_nohz(void)
 	struct rcu_data *rdp;
 
 #if defined(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL)
-	if (!rcu_state.nocb_is_setup) {
+	if (!cpumask_available(rcu_nocb_mask)) {
 		need_rcu_nocb_mask = true;
 		offload_all = true;
 	}
@@ -1235,10 +1234,9 @@  void __init rcu_init_nohz(void)
 				return;
 			}
 		}
-		rcu_state.nocb_is_setup = true;
 	}
 
-	if (!rcu_state.nocb_is_setup)
+	if (!cpumask_available(rcu_nocb_mask))
 		return;
 
 #if defined(CONFIG_NO_HZ_FULL)
@@ -1299,7 +1297,7 @@  static void rcu_spawn_cpu_nocb_kthread(int cpu)
 	struct task_struct *t;
 	struct sched_param sp;
 
-	if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
+	if (!rcu_scheduler_fully_active || !cpumask_available(rcu_nocb_mask))
 		return;
 
 	/* If there already is an rcuo kthread, then nothing to do. */