diff mbox

[1/5] context_tracking: generalize context tracking APIs to support user and guest

Message ID 1423600074-2907-2-git-send-email-riel@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Rik van Riel Feb. 10, 2015, 8:27 p.m. UTC
From: Rik van Riel <riel@redhat.com>

Split out the mechanism from context_tracking_user_enter and
context_tracking_user_exit into context_tracking_enter and
context_tracking_exit. Leave the old functions in order to avoid
breaking ARM, which calls these functions from assembler code,
and cannot easily use C enum parameters.

Add the expected ctx_state as a parameter to context_tracking_enter and
context_tracking_exit, allowing the same functions to not just track
kernel <> user space switching, but also kernel <> guest transitions.

Signed-off-by: Rik van Riel <riel@redhat.com>
---
 include/linux/context_tracking.h |  8 +++++---
 kernel/context_tracking.c        | 43 ++++++++++++++++++++++++++--------------
 2 files changed, 33 insertions(+), 18 deletions(-)

Comments

Paul E. McKenney Feb. 10, 2015, 9:28 p.m. UTC | #1
On Tue, Feb 10, 2015 at 03:27:50PM -0500, riel@redhat.com wrote:
> From: Rik van Riel <riel@redhat.com>
> 
> Split out the mechanism from context_tracking_user_enter and
> context_tracking_user_exit into context_tracking_enter and
> context_tracking_exit. Leave the old functions in order to avoid
> breaking ARM, which calls these functions from assembler code,
> and cannot easily use C enum parameters.
> 
> Add the expected ctx_state as a parameter to context_tracking_enter and
> context_tracking_exit, allowing the same functions to not just track
> kernel <> user space switching, but also kernel <> guest transitions.
> 
> Signed-off-by: Rik van Riel <riel@redhat.com>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  include/linux/context_tracking.h |  8 +++++---
>  kernel/context_tracking.c        | 43 ++++++++++++++++++++++++++--------------
>  2 files changed, 33 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
> index 37b81bd51ec0..954253283709 100644
> --- a/include/linux/context_tracking.h
> +++ b/include/linux/context_tracking.h
> @@ -10,6 +10,8 @@
>  #ifdef CONFIG_CONTEXT_TRACKING
>  extern void context_tracking_cpu_set(int cpu);
> 
> +extern void context_tracking_enter(enum ctx_state state);
> +extern void context_tracking_exit(enum ctx_state state);
>  extern void context_tracking_user_enter(void);
>  extern void context_tracking_user_exit(void);
>  extern void __context_tracking_task_switch(struct task_struct *prev,
> @@ -35,7 +37,7 @@ static inline enum ctx_state exception_enter(void)
>  		return 0;
> 
>  	prev_ctx = this_cpu_read(context_tracking.state);
> -	context_tracking_user_exit();
> +	context_tracking_exit(prev_ctx);
> 
>  	return prev_ctx;
>  }
> @@ -43,8 +45,8 @@ static inline enum ctx_state exception_enter(void)
>  static inline void exception_exit(enum ctx_state prev_ctx)
>  {
>  	if (context_tracking_is_enabled()) {
> -		if (prev_ctx == IN_USER)
> -			context_tracking_user_enter();
> +		if (prev_ctx != IN_KERNEL)
> +			context_tracking_enter(prev_ctx);
>  	}
>  }
> 
> diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
> index 937ecdfdf258..38e38aeac8b9 100644
> --- a/kernel/context_tracking.c
> +++ b/kernel/context_tracking.c
> @@ -39,15 +39,15 @@ void context_tracking_cpu_set(int cpu)
>  }
> 
>  /**
> - * context_tracking_user_enter - Inform the context tracking that the CPU is going to
> - *                               enter userspace mode.
> + * context_tracking_enter - Inform the context tracking that the CPU is going
> + *                          enter user or guest space mode.
>   *
>   * This function must be called right before we switch from the kernel
> - * to userspace, when it's guaranteed the remaining kernel instructions
> - * to execute won't use any RCU read side critical section because this
> - * function sets RCU in extended quiescent state.
> + * to user or guest space, when it's guaranteed the remaining kernel
> + * instructions to execute won't use any RCU read side critical section
> + * because this function sets RCU in extended quiescent state.
>   */
> -void context_tracking_user_enter(void)
> +void context_tracking_enter(enum ctx_state state)
>  {
>  	unsigned long flags;
> 
> @@ -75,7 +75,7 @@ void context_tracking_user_enter(void)
>  	WARN_ON_ONCE(!current->mm);
> 
>  	local_irq_save(flags);
> -	if ( __this_cpu_read(context_tracking.state) != IN_USER) {
> +	if ( __this_cpu_read(context_tracking.state) != state) {
>  		if (__this_cpu_read(context_tracking.active)) {
>  			trace_user_enter(0);
>  			/*
> @@ -101,24 +101,31 @@ void context_tracking_user_enter(void)
>  		 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
>  		 * is false because we know that CPU is not tickless.
>  		 */
> -		__this_cpu_write(context_tracking.state, IN_USER);
> +		__this_cpu_write(context_tracking.state, state);
>  	}
>  	local_irq_restore(flags);
>  }
> +NOKPROBE_SYMBOL(context_tracking_enter);
> +
> +void context_tracking_user_enter(void)
> +{
> +	context_tracking_enter(IN_USER);
> +}
>  NOKPROBE_SYMBOL(context_tracking_user_enter);
> 
>  /**
> - * context_tracking_user_exit - Inform the context tracking that the CPU is
> - *                              exiting userspace mode and entering the kernel.
> + * context_tracking_exit - Inform the context tracking that the CPU is
> + *                         exiting user or guest mode and entering the kernel.
>   *
> - * This function must be called after we entered the kernel from userspace
> - * before any use of RCU read side critical section. This potentially include
> - * any high level kernel code like syscalls, exceptions, signal handling, etc...
> + * This function must be called after we entered the kernel from user or
> + * guest space before any use of RCU read side critical section. This
> + * potentially include any high level kernel code like syscalls, exceptions,
> + * signal handling, etc...
>   *
>   * This call supports re-entrancy. This way it can be called from any exception
>   * handler without needing to know if we came from userspace or not.
>   */
> -void context_tracking_user_exit(void)
> +void context_tracking_exit(enum ctx_state state)
>  {
>  	unsigned long flags;
> 
> @@ -129,7 +136,7 @@ void context_tracking_user_exit(void)
>  		return;
> 
>  	local_irq_save(flags);
> -	if (__this_cpu_read(context_tracking.state) == IN_USER) {
> +	if (__this_cpu_read(context_tracking.state) == state) {
>  		if (__this_cpu_read(context_tracking.active)) {
>  			/*
>  			 * We are going to run code that may use RCU. Inform
> @@ -143,6 +150,12 @@ void context_tracking_user_exit(void)
>  	}
>  	local_irq_restore(flags);
>  }
> +NOKPROBE_SYMBOL(context_tracking_exit);
> +
> +void context_tracking_user_exit(void)
> +{
> +	context_tracking_exit(IN_USER);
> +}
>  NOKPROBE_SYMBOL(context_tracking_user_exit);
> 
>  /**
> -- 
> 1.9.3
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 37b81bd51ec0..954253283709 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,6 +10,8 @@ 
 #ifdef CONFIG_CONTEXT_TRACKING
 extern void context_tracking_cpu_set(int cpu);
 
+extern void context_tracking_enter(enum ctx_state state);
+extern void context_tracking_exit(enum ctx_state state);
 extern void context_tracking_user_enter(void);
 extern void context_tracking_user_exit(void);
 extern void __context_tracking_task_switch(struct task_struct *prev,
@@ -35,7 +37,7 @@  static inline enum ctx_state exception_enter(void)
 		return 0;
 
 	prev_ctx = this_cpu_read(context_tracking.state);
-	context_tracking_user_exit();
+	context_tracking_exit(prev_ctx);
 
 	return prev_ctx;
 }
@@ -43,8 +45,8 @@  static inline enum ctx_state exception_enter(void)
 static inline void exception_exit(enum ctx_state prev_ctx)
 {
 	if (context_tracking_is_enabled()) {
-		if (prev_ctx == IN_USER)
-			context_tracking_user_enter();
+		if (prev_ctx != IN_KERNEL)
+			context_tracking_enter(prev_ctx);
 	}
 }
 
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 937ecdfdf258..38e38aeac8b9 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -39,15 +39,15 @@  void context_tracking_cpu_set(int cpu)
 }
 
 /**
- * context_tracking_user_enter - Inform the context tracking that the CPU is going to
- *                               enter userspace mode.
+ * context_tracking_enter - Inform the context tracking that the CPU is going
+ *                          enter user or guest space mode.
  *
  * This function must be called right before we switch from the kernel
- * to userspace, when it's guaranteed the remaining kernel instructions
- * to execute won't use any RCU read side critical section because this
- * function sets RCU in extended quiescent state.
+ * to user or guest space, when it's guaranteed the remaining kernel
+ * instructions to execute won't use any RCU read side critical section
+ * because this function sets RCU in extended quiescent state.
  */
-void context_tracking_user_enter(void)
+void context_tracking_enter(enum ctx_state state)
 {
 	unsigned long flags;
 
@@ -75,7 +75,7 @@  void context_tracking_user_enter(void)
 	WARN_ON_ONCE(!current->mm);
 
 	local_irq_save(flags);
-	if ( __this_cpu_read(context_tracking.state) != IN_USER) {
+	if ( __this_cpu_read(context_tracking.state) != state) {
 		if (__this_cpu_read(context_tracking.active)) {
 			trace_user_enter(0);
 			/*
@@ -101,24 +101,31 @@  void context_tracking_user_enter(void)
 		 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
 		 * is false because we know that CPU is not tickless.
 		 */
-		__this_cpu_write(context_tracking.state, IN_USER);
+		__this_cpu_write(context_tracking.state, state);
 	}
 	local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(context_tracking_enter);
+
+void context_tracking_user_enter(void)
+{
+	context_tracking_enter(IN_USER);
+}
 NOKPROBE_SYMBOL(context_tracking_user_enter);
 
 /**
- * context_tracking_user_exit - Inform the context tracking that the CPU is
- *                              exiting userspace mode and entering the kernel.
+ * context_tracking_exit - Inform the context tracking that the CPU is
+ *                         exiting user or guest mode and entering the kernel.
  *
- * This function must be called after we entered the kernel from userspace
- * before any use of RCU read side critical section. This potentially include
- * any high level kernel code like syscalls, exceptions, signal handling, etc...
+ * This function must be called after we entered the kernel from user or
+ * guest space before any use of RCU read side critical section. This
+ * potentially include any high level kernel code like syscalls, exceptions,
+ * signal handling, etc...
  *
  * This call supports re-entrancy. This way it can be called from any exception
  * handler without needing to know if we came from userspace or not.
  */
-void context_tracking_user_exit(void)
+void context_tracking_exit(enum ctx_state state)
 {
 	unsigned long flags;
 
@@ -129,7 +136,7 @@  void context_tracking_user_exit(void)
 		return;
 
 	local_irq_save(flags);
-	if (__this_cpu_read(context_tracking.state) == IN_USER) {
+	if (__this_cpu_read(context_tracking.state) == state) {
 		if (__this_cpu_read(context_tracking.active)) {
 			/*
 			 * We are going to run code that may use RCU. Inform
@@ -143,6 +150,12 @@  void context_tracking_user_exit(void)
 	}
 	local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(context_tracking_exit);
+
+void context_tracking_user_exit(void)
+{
+	context_tracking_exit(IN_USER);
+}
 NOKPROBE_SYMBOL(context_tracking_user_exit);
 
 /**