diff mbox series

[v3,08/57] sched: Simplify wake_up_if_idle()

Message ID 20230612093537.977924652@infradead.org (mailing list archive)
State Handled Elsewhere
Delegated to: Paul Moore
Headers show
Series Scope-based Resource Management | expand

Commit Message

Peter Zijlstra June 12, 2023, 9:07 a.m. UTC
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/sched/core.c  |   20 ++++++--------------
 kernel/sched/sched.h |   15 +++++++++++++++
 2 files changed, 21 insertions(+), 14 deletions(-)

Comments

Boqun Feng June 12, 2023, 6:02 p.m. UTC | #1
On Mon, Jun 12, 2023 at 11:07:21AM +0200, Peter Zijlstra wrote:
> Use guards to reduce gotos and simplify control flow.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  kernel/sched/core.c  |   20 ++++++--------------
>  kernel/sched/sched.h |   15 +++++++++++++++
>  2 files changed, 21 insertions(+), 14 deletions(-)
> 
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3872,21 +3872,13 @@ static void __ttwu_queue_wakelist(struct
>  void wake_up_if_idle(int cpu)
>  {
>  	struct rq *rq = cpu_rq(cpu);
> -	struct rq_flags rf;
>  
> -	rcu_read_lock();
> -
> -	if (!is_idle_task(rcu_dereference(rq->curr)))
> -		goto out;
> -
> -	rq_lock_irqsave(rq, &rf);
> -	if (is_idle_task(rq->curr))
> -		resched_curr(rq);
> -	/* Else CPU is not idle, do nothing here: */
> -	rq_unlock_irqrestore(rq, &rf);
> -
> -out:
> -	rcu_read_unlock();
> +	guard(rcu)();
> +	if (is_idle_task(rcu_dereference(rq->curr))) {
> +		guard(rq_lock)(rq);

We assume that irq must be disabled when this function called?
Otherwise, I don't understand why this is not

	guard(rq_lock_irqsave)(rq);

?

Regards,
Boqun

> +		if (is_idle_task(rq->curr))
> +			resched_curr(rq);
> +	}
>  }
>  
>  bool cpus_share_cache(int this_cpu, int that_cpu)
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1678,6 +1678,21 @@ rq_unlock(struct rq *rq, struct rq_flags
>  	raw_spin_rq_unlock(rq);
>  }
>  
> +DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
> +		    rq_lock(_T->lock, &_T->rf),
> +		    rq_unlock(_T->lock, &_T->rf),
> +		    struct rq_flags rf)
> +
> +DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
> +		    rq_lock_irq(_T->lock, &_T->rf),
> +		    rq_unlock_irq(_T->lock, &_T->rf),
> +		    struct rq_flags rf)
> +
> +DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
> +		    rq_lock_irqsave(_T->lock, &_T->rf),
> +		    rq_unlock_irqrestore(_T->lock, &_T->rf),
> +		    struct rq_flags rf)
> +
>  static inline struct rq *
>  this_rq_lock_irq(struct rq_flags *rf)
>  	__acquires(rq->lock)
> 
>
Peter Zijlstra June 12, 2023, 8:02 p.m. UTC | #2
On Mon, Jun 12, 2023 at 11:02:28AM -0700, Boqun Feng wrote:
> On Mon, Jun 12, 2023 at 11:07:21AM +0200, Peter Zijlstra wrote:
> > Use guards to reduce gotos and simplify control flow.
> > 
> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> > ---
> >  kernel/sched/core.c  |   20 ++++++--------------
> >  kernel/sched/sched.h |   15 +++++++++++++++
> >  2 files changed, 21 insertions(+), 14 deletions(-)
> > 
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -3872,21 +3872,13 @@ static void __ttwu_queue_wakelist(struct
> >  void wake_up_if_idle(int cpu)
> >  {
> >  	struct rq *rq = cpu_rq(cpu);
> > -	struct rq_flags rf;
> >  
> > -	rcu_read_lock();
> > -
> > -	if (!is_idle_task(rcu_dereference(rq->curr)))
> > -		goto out;
> > -
> > -	rq_lock_irqsave(rq, &rf);
> > -	if (is_idle_task(rq->curr))
> > -		resched_curr(rq);
> > -	/* Else CPU is not idle, do nothing here: */
> > -	rq_unlock_irqrestore(rq, &rf);
> > -
> > -out:
> > -	rcu_read_unlock();
> > +	guard(rcu)();
> > +	if (is_idle_task(rcu_dereference(rq->curr))) {
> > +		guard(rq_lock)(rq);
> 
> We assume that irq must be disabled when this function called?
> Otherwise, I don't understand why this is not
> 
> 	guard(rq_lock_irqsave)(rq);
> 

You're quite right, I messed that up.
diff mbox series

Patch

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3872,21 +3872,13 @@  static void __ttwu_queue_wakelist(struct
 void wake_up_if_idle(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	struct rq_flags rf;
 
-	rcu_read_lock();
-
-	if (!is_idle_task(rcu_dereference(rq->curr)))
-		goto out;
-
-	rq_lock_irqsave(rq, &rf);
-	if (is_idle_task(rq->curr))
-		resched_curr(rq);
-	/* Else CPU is not idle, do nothing here: */
-	rq_unlock_irqrestore(rq, &rf);
-
-out:
-	rcu_read_unlock();
+	guard(rcu)();
+	if (is_idle_task(rcu_dereference(rq->curr))) {
+		guard(rq_lock)(rq);
+		if (is_idle_task(rq->curr))
+			resched_curr(rq);
+	}
 }
 
 bool cpus_share_cache(int this_cpu, int that_cpu)
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1678,6 +1678,21 @@  rq_unlock(struct rq *rq, struct rq_flags
 	raw_spin_rq_unlock(rq);
 }
 
+DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
+		    rq_lock(_T->lock, &_T->rf),
+		    rq_unlock(_T->lock, &_T->rf),
+		    struct rq_flags rf)
+
+DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
+		    rq_lock_irq(_T->lock, &_T->rf),
+		    rq_unlock_irq(_T->lock, &_T->rf),
+		    struct rq_flags rf)
+
+DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
+		    rq_lock_irqsave(_T->lock, &_T->rf),
+		    rq_unlock_irqrestore(_T->lock, &_T->rf),
+		    struct rq_flags rf)
+
 static inline struct rq *
 this_rq_lock_irq(struct rq_flags *rf)
 	__acquires(rq->lock)