diff mbox series

[RFC,52/86] sched: remove CONFIG_PREEMPTION from *_needbreak()

Message ID 20231107215742.363031-53-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series Make the kernel preemptible | expand

Commit Message

Ankur Arora Nov. 7, 2023, 9:57 p.m. UTC
Since CONFIG_PREEMPTION is always enabled we can remove the clutter.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 include/linux/sched.h | 15 +++------------
 1 file changed, 3 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4dabd9530f98..6ba4371761c4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2146,16 +2146,13 @@  static inline void cond_resched_rcu(void)
 
 /*
  * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
+ * task waiting?: this should really depend on whether we have
+ * sched_feat(FORCE_PREEMPT) or not but that is not visible
+ * outside the scheduler.
  */
 static inline int spin_needbreak(spinlock_t *lock)
 {
-#ifdef CONFIG_PREEMPTION
 	return spin_is_contended(lock);
-#else
-	return 0;
-#endif
 }
 
 /*
@@ -2163,16 +2160,10 @@  static inline int spin_needbreak(spinlock_t *lock)
  * Returns non-zero if there is another task waiting on the rwlock.
  * Returns zero if the lock is not contended or the system / underlying
  * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
  */
 static inline int rwlock_needbreak(rwlock_t *lock)
 {
-#ifdef CONFIG_PREEMPTION
 	return rwlock_is_contended(lock);
-#else
-	return 0;
-#endif
 }
 
 static __always_inline bool need_resched(void)