diff mbox series

[v5,net,3/3] bpf: report RCU QS in cpumap kthread

Message ID c17b9f1517e19d813da3ede5ed33ee18496bb5d8.1710877680.git.yan@cloudflare.com (mailing list archive)
State Accepted
Commit 00bf63122459e87193ee7f1bc6161c83a525569f
Headers show
Series Report RCU QS for busy network kthreads | expand

Commit Message

Yan Zhai March 19, 2024, 8:44 p.m. UTC
When there are heavy load, cpumap kernel threads can be busy polling
packets from redirect queues and block out RCU tasks from reaching
quiescent states. It is insufficient to just call cond_resched() in such
context. Periodically raise a consolidated RCU QS before cond_resched
fixes the problem.

Fixes: 6710e1126934 ("bpf: introduce new bpf cpu map type BPF_MAP_TYPE_CPUMAP")
Reviewed-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Yan Zhai <yan@cloudflare.com>
---
 kernel/bpf/cpumap.c | 3 +++
 1 file changed, 3 insertions(+)

Comments

Paul E. McKenney March 19, 2024, 9:32 p.m. UTC | #1
On Tue, Mar 19, 2024 at 01:44:40PM -0700, Yan Zhai wrote:
> When there are heavy load, cpumap kernel threads can be busy polling
> packets from redirect queues and block out RCU tasks from reaching
> quiescent states. It is insufficient to just call cond_resched() in such
> context. Periodically raise a consolidated RCU QS before cond_resched
> fixes the problem.
> 
> Fixes: 6710e1126934 ("bpf: introduce new bpf cpu map type BPF_MAP_TYPE_CPUMAP")
> Reviewed-by: Jesper Dangaard Brouer <hawk@kernel.org>
> Signed-off-by: Yan Zhai <yan@cloudflare.com>

Acked-by: Paul E. McKenney <paulmck@kernel.org>

> ---
>  kernel/bpf/cpumap.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
> index 9ee8da477465..a8e34416e960 100644
> --- a/kernel/bpf/cpumap.c
> +++ b/kernel/bpf/cpumap.c
> @@ -263,6 +263,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
>  static int cpu_map_kthread_run(void *data)
>  {
>  	struct bpf_cpu_map_entry *rcpu = data;
> +	unsigned long last_qs = jiffies;
>  
>  	complete(&rcpu->kthread_running);
>  	set_current_state(TASK_INTERRUPTIBLE);
> @@ -288,10 +289,12 @@ static int cpu_map_kthread_run(void *data)
>  			if (__ptr_ring_empty(rcpu->queue)) {
>  				schedule();
>  				sched = 1;
> +				last_qs = jiffies;
>  			} else {
>  				__set_current_state(TASK_RUNNING);
>  			}
>  		} else {
> +			rcu_softirq_qs_periodic(last_qs);
>  			sched = cond_resched();
>  		}
>  
> -- 
> 2.30.2
> 
>
diff mbox series

Patch

diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 9ee8da477465..a8e34416e960 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -263,6 +263,7 @@  static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
 static int cpu_map_kthread_run(void *data)
 {
 	struct bpf_cpu_map_entry *rcpu = data;
+	unsigned long last_qs = jiffies;
 
 	complete(&rcpu->kthread_running);
 	set_current_state(TASK_INTERRUPTIBLE);
@@ -288,10 +289,12 @@  static int cpu_map_kthread_run(void *data)
 			if (__ptr_ring_empty(rcpu->queue)) {
 				schedule();
 				sched = 1;
+				last_qs = jiffies;
 			} else {
 				__set_current_state(TASK_RUNNING);
 			}
 		} else {
+			rcu_softirq_qs_periodic(last_qs);
 			sched = cond_resched();
 		}