@@ -93,11 +93,14 @@ void dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule()
void dynamic_preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
#else /* CONFIG_PREEMPT_DYNAMIC */
#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
#endif /* CONFIG_PREEMPT_DYNAMIC */
#endif /* CONFIG_PREEMPTION */
@@ -75,10 +75,6 @@ static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs)
return state;
}
-#ifdef CONFIG_PREEMPT_DYNAMIC
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-#endif
-
static inline bool arm64_need_resched(void)
{
/*
@@ -106,17 +102,22 @@ static inline bool arm64_need_resched(void)
void raw_irqentry_exit_cond_resched(void)
{
-#ifdef CONFIG_PREEMPT_DYNAMIC
- if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
- return;
-#endif
-
if (!preempt_count()) {
if (need_resched() && arm64_need_resched())
preempt_schedule_irq();
}
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void)
+{
+ if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+ return;
+ raw_irqentry_exit_cond_resched();
+}
+#endif
+
/*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
@@ -140,7 +141,7 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs,
}
if (IS_ENABLED(CONFIG_PREEMPTION))
- raw_irqentry_exit_cond_resched();
+ irqentry_exit_cond_resched();
trace_hardirqs_on();
} else {
In generic entry, when PREEMPT_DYNAMIC is enabled or disabled, two different helpers are used to check whether resched is required and some common code is reused. In preparation for moving arm64 over to the generic entry code, use new helper to check resched when PREEMPT_DYNAMIC enabled and reuse common code for the disabled case. No functional changes. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/preempt.h | 3 +++ arch/arm64/kernel/entry-common.c | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-)