Message ID | 20211209150938.3518-4-dwmw2@infradead.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Parallel CPU bringup for x86_64 | expand |
On Thu, Dec 09, 2021 at 03:09:30PM +0000, David Woodhouse wrote: > From: David Woodhouse <dwmw@amazon.co.uk> > > As we handle parallel CPU bringup, we will need to take care to avoid > spawning multiple boost threads, or race conditions when setting their > affinity. Spotted by Paul McKenney. And again, if testing goes well and you don't get it there first, I expect to push this during the v5.18 merge window. In case you would like to push this with the rest of this series: Acked-by: Paul E. McKenney <paulmck@kernel.org> > Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> > --- > kernel/rcu/tree.c | 1 + > kernel/rcu/tree.h | 3 +++ > kernel/rcu/tree_plugin.h | 10 ++++++++-- > 3 files changed, 12 insertions(+), 2 deletions(-) > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c > index a1bb0b1229ed..809855474b39 100644 > --- a/kernel/rcu/tree.c > +++ b/kernel/rcu/tree.c > @@ -4527,6 +4527,7 @@ static void __init rcu_init_one(void) > init_waitqueue_head(&rnp->exp_wq[2]); > init_waitqueue_head(&rnp->exp_wq[3]); > spin_lock_init(&rnp->exp_lock); > + mutex_init(&rnp->boost_kthread_mutex); > } > } > > diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h > index aff4cc9303fb..055e30b3e5e0 100644 > --- a/kernel/rcu/tree.h > +++ b/kernel/rcu/tree.h > @@ -108,6 +108,9 @@ struct rcu_node { > /* side effect, not as a lock. */ > unsigned long boost_time; > /* When to start boosting (jiffies). */ > + struct mutex boost_kthread_mutex; > + /* Exclusion for thread spawning and affinity */ > + /* manipulation. */ > struct task_struct *boost_kthread_task; > /* kthread that takes care of priority */ > /* boosting for this rcu_node structure. */ > diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h > index 5199559fbbf0..3b4ee0933710 100644 > --- a/kernel/rcu/tree_plugin.h > +++ b/kernel/rcu/tree_plugin.h > @@ -1162,15 +1162,16 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) > struct sched_param sp; > struct task_struct *t; > > + mutex_lock(&rnp->boost_kthread_mutex); > if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) > - return; > + goto out; > > rcu_state.boost = 1; > > t = kthread_create(rcu_boost_kthread, (void *)rnp, > "rcub/%d", rnp_index); > if (WARN_ON_ONCE(IS_ERR(t))) > - return; > + goto out; > > raw_spin_lock_irqsave_rcu_node(rnp, flags); > rnp->boost_kthread_task = t; > @@ -1178,6 +1179,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) > sp.sched_priority = kthread_prio; > sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); > wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ > + > + out: > + mutex_unlock(&rnp->boost_kthread_mutex); > } > > /* > @@ -1200,6 +1204,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) > return; > if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) > return; > + mutex_lock(&rnp->boost_kthread_mutex); > for_each_leaf_node_possible_cpu(rnp, cpu) > if ((mask & leaf_node_cpu_bit(rnp, cpu)) && > cpu != outgoingcpu) > @@ -1207,6 +1212,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) > if (cpumask_weight(cm) == 0) > cpumask_setall(cm); > set_cpus_allowed_ptr(t, cm); > + mutex_unlock(&rnp->boost_kthread_mutex); > free_cpumask_var(cm); > } > > -- > 2.31.1 >
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a1bb0b1229ed..809855474b39 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4527,6 +4527,7 @@ static void __init rcu_init_one(void) init_waitqueue_head(&rnp->exp_wq[2]); init_waitqueue_head(&rnp->exp_wq[3]); spin_lock_init(&rnp->exp_lock); + mutex_init(&rnp->boost_kthread_mutex); } } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index aff4cc9303fb..055e30b3e5e0 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -108,6 +108,9 @@ struct rcu_node { /* side effect, not as a lock. */ unsigned long boost_time; /* When to start boosting (jiffies). */ + struct mutex boost_kthread_mutex; + /* Exclusion for thread spawning and affinity */ + /* manipulation. */ struct task_struct *boost_kthread_task; /* kthread that takes care of priority */ /* boosting for this rcu_node structure. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5199559fbbf0..3b4ee0933710 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1162,15 +1162,16 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) struct sched_param sp; struct task_struct *t; + mutex_lock(&rnp->boost_kthread_mutex); if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) - return; + goto out; rcu_state.boost = 1; t = kthread_create(rcu_boost_kthread, (void *)rnp, "rcub/%d", rnp_index); if (WARN_ON_ONCE(IS_ERR(t))) - return; + goto out; raw_spin_lock_irqsave_rcu_node(rnp, flags); rnp->boost_kthread_task = t; @@ -1178,6 +1179,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) sp.sched_priority = kthread_prio; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ + + out: + mutex_unlock(&rnp->boost_kthread_mutex); } /* @@ -1200,6 +1204,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) return; if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) return; + mutex_lock(&rnp->boost_kthread_mutex); for_each_leaf_node_possible_cpu(rnp, cpu) if ((mask & leaf_node_cpu_bit(rnp, cpu)) && cpu != outgoingcpu) @@ -1207,6 +1212,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) if (cpumask_weight(cm) == 0) cpumask_setall(cm); set_cpus_allowed_ptr(t, cm); + mutex_unlock(&rnp->boost_kthread_mutex); free_cpumask_var(cm); }