diff mbox series

[V2,07/11] sched/core: Add rcu_preempt_switch()

Message ID 20240407090558.3395-8-jiangshanlai@gmail.com (mailing list archive)
State New
Headers show
Series rcu/x86: Use per-cpu rcu preempt count | expand

Commit Message

Lai Jiangshan April 7, 2024, 9:05 a.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

When the per-cpu rcu_preempt_count is used, it has to be switched too
on context-switching.  And the instructions to switch the per-cpu
rcu_preempt_count are few, so it is inlined to avoid the overhead
in the scheduler.

Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 kernel/rcu/rcu.h    | 5 +++++
 kernel/sched/core.c | 2 ++
 2 files changed, 7 insertions(+)
diff mbox series

Patch

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index b17b2ed657fc..ea5ae957c687 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -668,4 +668,9 @@  int rcu_stall_notifier_call_chain(unsigned long val, void *v);
 static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
 #endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 
+static inline void
+rcu_preempt_switch(struct task_struct *prev, struct task_struct *next)
+{
+}
+
 #endif /* __KERNEL_RCU_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7019a40457a6..1d9e3c51c913 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -95,6 +95,7 @@ 
 #include "../workqueue_internal.h"
 #include "../../io_uring/io-wq.h"
 #include "../smpboot.h"
+#include "../rcu/rcu.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
@@ -6737,6 +6738,7 @@  static void __sched notrace __schedule(unsigned int sched_mode)
 		 */
 		++*switch_count;
 
+		rcu_preempt_switch(prev, next);
 		migrate_disable_switch(rq, prev);
 		psi_sched_switch(prev, next, !task_on_rq_queued(prev));