@@ -165,19 +165,33 @@
.macro preempt_enable_no_resched, tsk, cnt
#ifdef CONFIG_PREEMPT_COUNT
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+ stmdb sp!, {r0-r3, ip, lr}
+ mov r0, #1
+ bl sub_preempt_count
+ ldmia sp!, {r0-r3, ip, lr}
+#else
get_thread_info \tsk
ldr \cnt, [\tsk, #TI_PREEMPT] @ get preempt count
sub \cnt, \cnt, #1 @ decrement it
str \cnt, [\tsk, #TI_PREEMPT]
#endif
+#endif
.endm
.macro preempt_disable, tsk, cnt
#ifdef CONFIG_PREEMPT_COUNT
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+ stmdb sp!, {r0-r3, ip, lr}
+ mov r0, #1
+ bl add_preempt_count
+ ldmia sp!, {r0-r3, ip, lr}
+#else
ldr \cnt, [\tsk, #TI_PREEMPT] @ get preempt count
add \cnt, \cnt, #1 @ increment it
str \cnt, [\tsk, #TI_PREEMPT]
#endif
+#endif
.endm
/*
The VFP code enables and disables preemption but doesn't call into the tracer except for in the VFP bounce code to say that preemption has been enabled again. Trace the preemption disable and enable calls made in assembly so that we can accurately measure how long preemption is disabled while handling VFP exceptions. Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> --- arch/arm/kernel/entry-header.S | 14 ++++++++++++++ 1 file changed, 14 insertions(+)