diff mbox series

[4/9] softirq: Allow early break

Message ID 20230505113315.3307723-5-liujian56@huawei.com (mailing list archive)
State New, archived
Headers show
Series fix softlockup in run_timer_softirq | expand

Commit Message

liujian (CE) May 5, 2023, 11:33 a.m. UTC
From: Peter Zijlstra <peterz@infradead.org>

Allow terminating the softirq processing loop without finishing the
vectors.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Liu Jian <liujian56@huawei.com>
---
 kernel/softirq.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

Comments

Eric Dumazet May 5, 2023, 12:10 p.m. UTC | #1
On Fri, May 5, 2023 at 1:24 PM Liu Jian <liujian56@huawei.com> wrote:
>
> From: Peter Zijlstra <peterz@infradead.org>
>
> Allow terminating the softirq processing loop without finishing the
> vectors.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Signed-off-by: Liu Jian <liujian56@huawei.com>
> ---
>  kernel/softirq.c | 16 ++++++++++------
>  1 file changed, 10 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/softirq.c b/kernel/softirq.c
> index 48a81d8ae49a..e2cad5d108c8 100644
> --- a/kernel/softirq.c
> +++ b/kernel/softirq.c
> @@ -582,6 +582,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
>                                prev_count, preempt_count());
>                         preempt_count_set(prev_count);
>                 }
> +
> +               if (pending && __softirq_needs_break(start))
> +                       break;

This means that some softirq may starve, because

for_each_set_bit(vec_nr, &pending, NR_SOFTIRQS)

Always iterate using the same order (of bits)




>         }
>
>         if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
> @@ -590,13 +593,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
>
>         local_irq_disable();
>
> -       pending = local_softirq_pending();
> -       if (pending) {
> -               if (!__softirq_needs_break(start) && --max_restart)
> -                       goto restart;
> +       if (pending)
> +               or_softirq_pending(pending);
> +       else if ((pending = local_softirq_pending()) &&
> +                !__softirq_needs_break(start) &&
> +                --max_restart)
> +               goto restart;
>
> -               wakeup_softirqd();
> -       }
> +       wakeup_softirqd();
>
>         account_softirq_exit(current);
>         lockdep_softirq_end(in_hardirq);
> --
> 2.34.1
>
liujian (CE) May 8, 2023, 3:37 a.m. UTC | #2
On 2023/5/5 20:10, Eric Dumazet wrote:
> On Fri, May 5, 2023 at 1:24 PM Liu Jian <liujian56@huawei.com> wrote:
>>
>> From: Peter Zijlstra <peterz@infradead.org>
>>
>> Allow terminating the softirq processing loop without finishing the
>> vectors.
>>
>> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
>> Signed-off-by: Liu Jian <liujian56@huawei.com>
>> ---
>>   kernel/softirq.c | 16 ++++++++++------
>>   1 file changed, 10 insertions(+), 6 deletions(-)
>>
>> diff --git a/kernel/softirq.c b/kernel/softirq.c
>> index 48a81d8ae49a..e2cad5d108c8 100644
>> --- a/kernel/softirq.c
>> +++ b/kernel/softirq.c
>> @@ -582,6 +582,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
>>                                 prev_count, preempt_count());
>>                          preempt_count_set(prev_count);
>>                  }
>> +
>> +               if (pending && __softirq_needs_break(start))
>> +                       break;
> 
> This means that some softirq may starve, because
> 
> for_each_set_bit(vec_nr, &pending, NR_SOFTIRQS)
> 
> Always iterate using the same order (of bits)
> 
> 
Yes, we need to record the softirq that is not executed and continue 
next time. I will fix it in next version.
Thanks.
> 
> 
>>          }
>>
>>          if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
>> @@ -590,13 +593,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
>>
>>          local_irq_disable();
>>
>> -       pending = local_softirq_pending();
>> -       if (pending) {
>> -               if (!__softirq_needs_break(start) && --max_restart)
>> -                       goto restart;
>> +       if (pending)
>> +               or_softirq_pending(pending);
>> +       else if ((pending = local_softirq_pending()) &&
>> +                !__softirq_needs_break(start) &&
>> +                --max_restart)
>> +               goto restart;
>>
>> -               wakeup_softirqd();
>> -       }
>> +       wakeup_softirqd();
>>
>>          account_softirq_exit(current);
>>          lockdep_softirq_end(in_hardirq);
>> --
>> 2.34.1
>>
diff mbox series

Patch

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 48a81d8ae49a..e2cad5d108c8 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -582,6 +582,9 @@  asmlinkage __visible void __softirq_entry __do_softirq(void)
 			       prev_count, preempt_count());
 			preempt_count_set(prev_count);
 		}
+
+		if (pending && __softirq_needs_break(start))
+			break;
 	}
 
 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
@@ -590,13 +593,14 @@  asmlinkage __visible void __softirq_entry __do_softirq(void)
 
 	local_irq_disable();
 
-	pending = local_softirq_pending();
-	if (pending) {
-		if (!__softirq_needs_break(start) && --max_restart)
-			goto restart;
+	if (pending)
+		or_softirq_pending(pending);
+	else if ((pending = local_softirq_pending()) &&
+		 !__softirq_needs_break(start) &&
+		 --max_restart)
+		goto restart;
 
-		wakeup_softirqd();
-	}
+	wakeup_softirqd();
 
 	account_softirq_exit(current);
 	lockdep_softirq_end(in_hardirq);