@@ -2,9 +2,7 @@
#define LINUX_HARDIRQ_H
#include <linux/preempt.h>
-#ifdef CONFIG_PREEMPT
#include <linux/smp_lock.h>
-#endif
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <asm/hardirq.h>
@@ -92,13 +90,8 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
-#if defined(CONFIG_PREEMPT)
-# define PREEMPT_INATOMIC_BASE kernel_locked()
-# define PREEMPT_CHECK_OFFSET 1
-#else
-# define PREEMPT_INATOMIC_BASE 0
-# define PREEMPT_CHECK_OFFSET 0
-#endif
+#define PREEMPT_CHECK_OFFSET 1
+#define PREEMPT_INATOMIC_BASE kernel_locked()
/*
* Are we running in atomic context? WARNING: this macro cannot
@@ -116,12 +109,11 @@
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+#define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#ifdef CONFIG_PREEMPT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define preemptible() 0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
@@ -82,14 +82,24 @@ do { \
#else
-#define preempt_disable() do { } while (0)
-#define preempt_enable_no_resched() do { } while (0)
-#define preempt_enable() do { } while (0)
+#define preempt_disable() \
+do { \
+ inc_preempt_count(); \
+ barrier(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+ barrier(); \
+ dec_preempt_count(); \
+} while (0)
+
+#define preempt_enable_no_resched() preempt_enable()
#define preempt_check_resched() do { } while (0)
-#define preempt_disable_notrace() do { } while (0)
-#define preempt_enable_no_resched_notrace() do { } while (0)
-#define preempt_enable_notrace() do { } while (0)
+#define preempt_disable_notrace() preempt_disable()
+#define preempt_enable_no_resched_notrace() preempt_enable()
+#define preempt_enable_notrace() preempt_enable()
#endif
@@ -2379,11 +2379,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#ifdef CONFIG_PREEMPT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET 0
-#endif
#define cond_resched_lock(lock) ({ \
__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
@@ -2590,10 +2590,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
#endif
-#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
-#endif
plist_node_init(&p->pushable_tasks, MAX_PRIO);
put_cpu();
@@ -6973,11 +6971,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT)
task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
- task_thread_info(idle)->preempt_count = 0;
-#endif
/*
* The idle tasks have their own, simple scheduling class:
*/
@@ -93,6 +93,7 @@ static inline void __lock_kernel(void)
*/
static inline void __lock_kernel(void)
{
+ preempt_disable();
_raw_spin_lock(&kernel_flag);
}
#endif