@@ -10,8 +10,8 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
-extern void context_tracking_enter(void);
-extern void context_tracking_exit(void);
+extern void context_tracking_enter(enum ctx_state state);
+extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
extern void __context_tracking_task_switch(struct task_struct *prev,
@@ -37,7 +37,7 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
- context_tracking_user_exit();
+ context_tracking_exit(prev_ctx);
return prev_ctx;
}
@@ -45,8 +45,8 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
- if (prev_ctx == IN_USER)
- context_tracking_user_enter();
+ if (prev_ctx != IN_KERNEL)
+ context_tracking_enter(prev_ctx);
}
}
@@ -47,7 +47,7 @@ void context_tracking_cpu_set(int cpu)
* instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state.
*/
-void context_tracking_enter(void)
+void context_tracking_enter(enum ctx_state state)
{
unsigned long flags;
@@ -75,7 +75,7 @@ void context_tracking_enter(void)
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
- if ( __this_cpu_read(context_tracking.state) != IN_USER) {
+ if ( __this_cpu_read(context_tracking.state) != state) {
if (__this_cpu_read(context_tracking.active)) {
trace_user_enter(0);
/*
@@ -101,7 +101,7 @@ void context_tracking_enter(void)
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
* is false because we know that CPU is not tickless.
*/
- __this_cpu_write(context_tracking.state, IN_USER);
+ __this_cpu_write(context_tracking.state, state);
}
local_irq_restore(flags);
}
@@ -109,7 +109,7 @@ NOKPROBE_SYMBOL(context_tracking_enter);
void context_tracking_user_enter(void)
{
- context_tracking_enter();
+ context_tracking_enter(IN_USER);
}
NOKPROBE_SYMBOL(context_tracking_user_enter);
@@ -125,7 +125,7 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void context_tracking_exit(void)
+void context_tracking_exit(enum ctx_state state)
{
unsigned long flags;
@@ -136,7 +136,7 @@ void context_tracking_exit(void)
return;
local_irq_save(flags);
- if (__this_cpu_read(context_tracking.state) == IN_USER) {
+ if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
/*
* We are going to run code that may use RCU. Inform
@@ -154,7 +154,7 @@ NOKPROBE_SYMBOL(context_tracking_exit);
void context_tracking_user_exit(void)
{
- context_tracking_exit();
+ context_tracking_exit(IN_USER);
}
NOKPROBE_SYMBOL(context_tracking_user_exit);