@@ -92,12 +92,11 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
+#define PREEMPT_CHECK_OFFSET 1
#if defined(CONFIG_PREEMPT)
# define PREEMPT_INATOMIC_BASE kernel_locked()
-# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_INATOMIC_BASE 0
-# define PREEMPT_CHECK_OFFSET 0
#endif
/*
@@ -116,12 +115,11 @@
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+#define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#ifdef CONFIG_PREEMPT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define preemptible() 0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
@@ -82,14 +82,24 @@ do { \
#else
-#define preempt_disable() do { } while (0)
-#define preempt_enable_no_resched() do { } while (0)
-#define preempt_enable() do { } while (0)
+#define preempt_disable() \
+do { \
+ inc_preempt_count(); \
+ barrier(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+ barrier(); \
+ dec_preempt_count(); \
+} while (0)
+
+#define preempt_enable_no_resched() preempt_enable()
#define preempt_check_resched() do { } while (0)
-#define preempt_disable_notrace() do { } while (0)
-#define preempt_enable_no_resched_notrace() do { } while (0)
-#define preempt_enable_notrace() do { } while (0)
+#define preempt_disable_notrace() preempt_disable()
+#define preempt_enable_no_resched_notrace() preempt_enable()
+#define preempt_enable_notrace() preempt_enable()
#endif
@@ -2561,10 +2561,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
#endif
-#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
-#endif
plist_node_init(&p->pushable_tasks, MAX_PRIO);
put_cpu();
@@ -6944,11 +6942,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT)
task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
- task_thread_info(idle)->preempt_count = 0;
-#endif
/*
* The idle tasks have their own, simple scheduling class:
*/
@@ -93,6 +93,7 @@ static inline void __lock_kernel(void)
*/
static inline void __lock_kernel(void)
{
+ preempt_disable();
_raw_spin_lock(&kernel_flag);
}
#endif