@@ -1395,41 +1395,11 @@ config HAVE_STATIC_CALL_INLINE
config HAVE_PREEMPT_DYNAMIC
bool
-
-config HAVE_PREEMPT_DYNAMIC_CALL
- bool
depends on HAVE_STATIC_CALL
- select HAVE_PREEMPT_DYNAMIC
help
- An architecture should select this if it can handle the preemption
- model being selected at boot time using static calls.
-
- Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
- preemption function will be patched directly.
-
- Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
- call to a preemption function will go through a trampoline, and the
- trampoline will be patched.
-
- It is strongly advised to support inline static call to avoid any
- overhead.
-
-config HAVE_PREEMPT_DYNAMIC_KEY
- bool
- depends on HAVE_ARCH_JUMP_LABEL
- select HAVE_PREEMPT_DYNAMIC
- help
- An architecture should select this if it can handle the preemption
- model being selected at boot time using static keys.
-
- Each preemption function will be given an early return based on a
- static key. This should have slightly lower overhead than non-inline
- static calls, as this effectively inlines each trampoline into the
- start of its callee. This may avoid redundant work, and may
- integrate better with CFI schemes.
-
- This will have greater overhead than using inline static calls as
- the call to the preemption function cannot be entirely elided.
+ Select this if the architecture support boot time preempt setting
+ on top of static calls. It is strongly advised to support inline
+ static call to avoid any overhead.
config ARCH_WANT_LD_ORPHAN_WARN
bool
@@ -270,7 +270,7 @@ config X86
select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
- select HAVE_PREEMPT_DYNAMIC_CALL
+ select HAVE_PREEMPT_DYNAMIC
select HAVE_RSEQ
select HAVE_RUST if X86_64
select HAVE_SYSCALL_TRACEPOINTS
@@ -416,19 +416,13 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
*/
void raw_irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
#define irqentry_exit_cond_resched_dynamic_disabled NULL
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-void dynamic_irqentry_exit_cond_resched(void);
-#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
-#endif
-#else /* CONFIG_PREEMPT_DYNAMIC */
+#else
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
-#endif /* CONFIG_PREEMPT_DYNAMIC */
+#endif
/**
* irqentry_exit - Handle return from exception that used irqentry_enter()
@@ -101,7 +101,7 @@ struct user;
extern int __cond_resched(void);
# define might_resched() __cond_resched()
-#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#elif defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void);
@@ -112,11 +112,6 @@ static __always_inline void might_resched(void)
static_call_mod(might_resched)();
}
-#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-
-extern int dynamic_might_resched(void);
-# define might_resched() dynamic_might_resched()
-
#else
# define might_resched() do { } while (0)
@@ -2084,7 +2084,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void);
-#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
@@ -2093,14 +2093,6 @@ static __always_inline int _cond_resched(void)
return static_call_mod(cond_resched)();
}
-#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-extern int dynamic_cond_resched(void);
-
-static __always_inline int _cond_resched(void)
-{
- return dynamic_cond_resched();
-}
-
#else
static inline int _cond_resched(void)
@@ -96,9 +96,8 @@ config PREEMPTION
config PREEMPT_DYNAMIC
bool "Preemption behaviour defined on boot"
depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
- select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
select PREEMPT_BUILD
- default y if HAVE_PREEMPT_DYNAMIC_CALL
+ default y
help
This option allows to define the preemption model on the kernel
command line parameter and thus override the default preemption
@@ -4,7 +4,6 @@
#include <linux/entry-common.h>
#include <linux/resume_user_mode.h>
#include <linux/highmem.h>
-#include <linux/jump_label.h>
#include <linux/kmsan.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
@@ -390,17 +389,7 @@ void raw_irqentry_exit_cond_resched(void)
}
}
#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-void dynamic_irqentry_exit_cond_resched(void)
-{
- if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
- return;
- raw_irqentry_exit_cond_resched();
-}
-#endif
#endif
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
@@ -6885,32 +6885,22 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;
+
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#ifndef preempt_schedule_dynamic_enabled
#define preempt_schedule_dynamic_enabled preempt_schedule
#define preempt_schedule_dynamic_disabled NULL
#endif
DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
-void __sched notrace dynamic_preempt_schedule(void)
-{
- if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
- return;
- preempt_schedule();
-}
-NOKPROBE_SYMBOL(dynamic_preempt_schedule);
-EXPORT_SYMBOL(dynamic_preempt_schedule);
-#endif
#endif
+
/**
* preempt_schedule_notrace - preempt_schedule called by tracing
*
@@ -6964,24 +6954,12 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#ifndef preempt_schedule_notrace_dynamic_enabled
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
#define preempt_schedule_notrace_dynamic_disabled NULL
#endif
DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
-void __sched notrace dynamic_preempt_schedule_notrace(void)
-{
- if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
- return;
- preempt_schedule_notrace();
-}
-NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
-EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
-#endif
#endif
#endif /* CONFIG_PREEMPTION */
@@ -8583,7 +8561,6 @@ EXPORT_SYMBOL(__cond_resched);
#endif
#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define cond_resched_dynamic_enabled __cond_resched
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
@@ -8593,25 +8570,6 @@ EXPORT_STATIC_CALL_TRAMP(cond_resched);
#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
EXPORT_STATIC_CALL_TRAMP(might_resched);
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
-int __sched dynamic_cond_resched(void)
-{
- if (!static_branch_unlikely(&sk_dynamic_cond_resched))
- return 0;
- return __cond_resched();
-}
-EXPORT_SYMBOL(dynamic_cond_resched);
-
-static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
-int __sched dynamic_might_resched(void)
-{
- if (!static_branch_unlikely(&sk_dynamic_might_resched))
- return 0;
- return __cond_resched();
-}
-EXPORT_SYMBOL(dynamic_might_resched);
-#endif
#endif
/*
@@ -8735,15 +8693,8 @@ int sched_dynamic_mode(const char *str)
return -EINVAL;
}
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
-#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
-#else
-#error "Unsupported PREEMPT_DYNAMIC mechanism"
-#endif
void sched_dynamic_update(int mode)
{
This reverts commit 99cf983cc8bca4adb461b519664c939a565cfd4d. Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> --- arch/Kconfig | 36 ++---------------------- arch/x86/Kconfig | 2 +- include/linux/entry-common.h | 10 ++----- include/linux/kernel.h | 7 +---- include/linux/sched.h | 10 +------ kernel/Kconfig.preempt | 3 +- kernel/entry/common.c | 11 -------- kernel/sched/core.c | 53 ++---------------------------------- 8 files changed, 11 insertions(+), 121 deletions(-)