diff mbox series

[v10,3/5] arm64: move preemption disablement to prctl handlers

Message ID 20210713234801.3858018-4-pcc@google.com (mailing list archive)
State New, archived
Headers show
Series arm64: mte: allow async MTE to be upgraded to sync on a per-CPU basis | expand

Commit Message

Peter Collingbourne July 13, 2021, 11:47 p.m. UTC
In the next patch, we will start reading sctlr_user from
mte_update_sctlr_user and subsequently writing a new value based on the
task's TCF setting and potentially the per-CPU TCF preference. This
means that we need to be careful to disable preemption around any
code sequences that read from sctlr_user and subsequently write to
sctlr_user and/or SCTLR_EL1, so that we don't end up writing a stale
value (based on the previous CPU's TCF preference) to either of them.

We currently have four such sequences, in the prctl handlers for
PR_SET_TAGGED_ADDR_CTRL and PR_PAC_SET_ENABLED_KEYS, as well as in
the task initialization code that resets the prctl settings. Change
the prctl handlers to disable preemption in the handlers themselves
rather than the functions that they call, and change the task
initialization code to call the respective prctl handlers instead of
setting sctlr_user directly.

As a result of this change, we no longer need the helper function
set_task_sctlr_el1, nor does its behavior make sense any more, so
remove it.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/Ic0e8a0c00bb47d786c1e8011df0b7fe99bee4bb5
---
 arch/arm64/include/asm/pointer_auth.h | 12 ++++++------
 arch/arm64/include/asm/processor.h    |  2 +-
 arch/arm64/kernel/mte.c               |  8 ++++----
 arch/arm64/kernel/pointer_auth.c      | 10 ++++++----
 arch/arm64/kernel/process.c           | 21 +++++++--------------
 5 files changed, 24 insertions(+), 29 deletions(-)

Comments

Will Deacon July 14, 2021, 11:43 a.m. UTC | #1
On Tue, Jul 13, 2021 at 04:47:59PM -0700, Peter Collingbourne wrote:
> In the next patch, we will start reading sctlr_user from
> mte_update_sctlr_user and subsequently writing a new value based on the
> task's TCF setting and potentially the per-CPU TCF preference. This
> means that we need to be careful to disable preemption around any
> code sequences that read from sctlr_user and subsequently write to
> sctlr_user and/or SCTLR_EL1, so that we don't end up writing a stale
> value (based on the previous CPU's TCF preference) to either of them.
> 
> We currently have four such sequences, in the prctl handlers for
> PR_SET_TAGGED_ADDR_CTRL and PR_PAC_SET_ENABLED_KEYS, as well as in
> the task initialization code that resets the prctl settings. Change
> the prctl handlers to disable preemption in the handlers themselves
> rather than the functions that they call, and change the task
> initialization code to call the respective prctl handlers instead of
> setting sctlr_user directly.
> 
> As a result of this change, we no longer need the helper function
> set_task_sctlr_el1, nor does its behavior make sense any more, so
> remove it.
> 
> Signed-off-by: Peter Collingbourne <pcc@google.com>
> Link: https://linux-review.googlesource.com/id/Ic0e8a0c00bb47d786c1e8011df0b7fe99bee4bb5
> ---
>  arch/arm64/include/asm/pointer_auth.h | 12 ++++++------
>  arch/arm64/include/asm/processor.h    |  2 +-
>  arch/arm64/kernel/mte.c               |  8 ++++----
>  arch/arm64/kernel/pointer_auth.c      | 10 ++++++----
>  arch/arm64/kernel/process.c           | 21 +++++++--------------
>  5 files changed, 24 insertions(+), 29 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
> index d50416be99be..592968f0bc22 100644
> --- a/arch/arm64/include/asm/pointer_auth.h
> +++ b/arch/arm64/include/asm/pointer_auth.h
> @@ -10,6 +10,9 @@
>  #include <asm/memory.h>
>  #include <asm/sysreg.h>
>  
> +#define PR_PAC_ENABLED_KEYS_MASK                                               \
> +	(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
> +
>  #ifdef CONFIG_ARM64_PTR_AUTH
>  /*
>   * Each key is a 128-bit quantity which is split across a pair of 64-bit
> @@ -113,9 +116,9 @@ static __always_inline void ptrauth_enable(void)
>  									       \
>  		/* enable all keys */                                          \
>  		if (system_supports_address_auth())                            \
> -			set_task_sctlr_el1(current->thread.sctlr_user |        \
> -					   SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |   \
> -					   SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);   \
> +			ptrauth_set_enabled_keys(current,                      \
> +						 PR_PAC_ENABLED_KEYS_MASK,     \
> +						 PR_PAC_ENABLED_KEYS_MASK);    \
>  	} while (0)
>  
>  #define ptrauth_thread_switch_user(tsk)                                        \
> @@ -139,7 +142,4 @@ static __always_inline void ptrauth_enable(void)
>  #define ptrauth_thread_switch_kernel(tsk)
>  #endif /* CONFIG_ARM64_PTR_AUTH */
>  
> -#define PR_PAC_ENABLED_KEYS_MASK                                               \
> -	(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
> -
>  #endif /* __ASM_POINTER_AUTH_H */
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index 80ceb9cbdd60..ebb3b1aefed7 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -257,7 +257,7 @@ extern void release_thread(struct task_struct *);
>  
>  unsigned long get_wchan(struct task_struct *p);
>  
> -void set_task_sctlr_el1(u64 sctlr);
> +void update_sctlr_el1(u64 sctlr);
>  
>  /* Thread switching */
>  extern struct task_struct *cpu_switch_to(struct task_struct *prev,
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 53d89915029d..432d9b641e9c 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -222,9 +222,7 @@ void mte_thread_init_user(void)
>  	write_sysreg_s(0, SYS_TFSRE0_EL1);
>  	clear_thread_flag(TIF_MTE_ASYNC_FAULT);
>  	/* disable tag checking and reset tag generation mask */
> -	current->thread.mte_ctrl = MTE_CTRL_GCR_USER_EXCL_MASK;
> -	mte_update_sctlr_user(current);
> -	set_task_sctlr_el1(current->thread.sctlr_user);
> +	set_mte_ctrl(current, 0);
>  }
>  
>  void mte_thread_switch(struct task_struct *next)
> @@ -281,8 +279,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
>  
>  	task->thread.mte_ctrl = mte_ctrl;
>  	if (task == current) {
> +		preempt_disable();
>  		mte_update_sctlr_user(task);
> -		set_task_sctlr_el1(task->thread.sctlr_user);
> +		update_sctlr_el1(task->thread.sctlr_user);
> +		preempt_enable();
>  	}
>  
>  	return 0;
> diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
> index 60901ab0a7fe..2708b620b4ae 100644
> --- a/arch/arm64/kernel/pointer_auth.c
> +++ b/arch/arm64/kernel/pointer_auth.c
> @@ -67,7 +67,7 @@ static u64 arg_to_enxx_mask(unsigned long arg)
>  int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
>  			     unsigned long enabled)
>  {
> -	u64 sctlr = tsk->thread.sctlr_user;
> +	u64 sctlr;
>  
>  	if (!system_supports_address_auth())
>  		return -EINVAL;
> @@ -78,12 +78,14 @@ int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
>  	if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
>  		return -EINVAL;
>  
> +	preempt_disable();
> +	sctlr = tsk->thread.sctlr_user;
>  	sctlr &= ~arg_to_enxx_mask(keys);
>  	sctlr |= arg_to_enxx_mask(enabled);
> +	tsk->thread.sctlr_user = sctlr;
>  	if (tsk == current)
> -		set_task_sctlr_el1(sctlr);
> -	else
> -		tsk->thread.sctlr_user = sctlr;
> +		update_sctlr_el1(sctlr);
> +	preempt_enable();

This should work, as long as tsk is stopped if tsk != current, otherwise I
think there are a bunch of existing races between
ptrauth_set_enabled_keys(), set_mte_ctrl() and ptrauth_get_enabled_keys().
That's (probably?) true for the ptrace case, which is the only scenario
where tsk != current afaict.

I can't help but feel we should add _something_ (as a separate patch) to
avoid tripping over this in future. Maybe a WARN_ON(!task_is_stopped(tsk))
if tsk != current? Failing that, we could handle the concurrency by adding a
helper to update sctlr_user, which takes a clear and a set mask and does a
cmpxchg_relaxed() under the hood? That might also eliminate the need for
some of these preempt_disable() sections.

Anyway, that's a pre-existing issue, so for this patch:

Acked-by: Will Deacon <will@kernel.org>

Will
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index d50416be99be..592968f0bc22 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -10,6 +10,9 @@ 
 #include <asm/memory.h>
 #include <asm/sysreg.h>
 
+#define PR_PAC_ENABLED_KEYS_MASK                                               \
+	(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
+
 #ifdef CONFIG_ARM64_PTR_AUTH
 /*
  * Each key is a 128-bit quantity which is split across a pair of 64-bit
@@ -113,9 +116,9 @@  static __always_inline void ptrauth_enable(void)
 									       \
 		/* enable all keys */                                          \
 		if (system_supports_address_auth())                            \
-			set_task_sctlr_el1(current->thread.sctlr_user |        \
-					   SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |   \
-					   SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);   \
+			ptrauth_set_enabled_keys(current,                      \
+						 PR_PAC_ENABLED_KEYS_MASK,     \
+						 PR_PAC_ENABLED_KEYS_MASK);    \
 	} while (0)
 
 #define ptrauth_thread_switch_user(tsk)                                        \
@@ -139,7 +142,4 @@  static __always_inline void ptrauth_enable(void)
 #define ptrauth_thread_switch_kernel(tsk)
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
-#define PR_PAC_ENABLED_KEYS_MASK                                               \
-	(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
-
 #endif /* __ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 80ceb9cbdd60..ebb3b1aefed7 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -257,7 +257,7 @@  extern void release_thread(struct task_struct *);
 
 unsigned long get_wchan(struct task_struct *p);
 
-void set_task_sctlr_el1(u64 sctlr);
+void update_sctlr_el1(u64 sctlr);
 
 /* Thread switching */
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 53d89915029d..432d9b641e9c 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -222,9 +222,7 @@  void mte_thread_init_user(void)
 	write_sysreg_s(0, SYS_TFSRE0_EL1);
 	clear_thread_flag(TIF_MTE_ASYNC_FAULT);
 	/* disable tag checking and reset tag generation mask */
-	current->thread.mte_ctrl = MTE_CTRL_GCR_USER_EXCL_MASK;
-	mte_update_sctlr_user(current);
-	set_task_sctlr_el1(current->thread.sctlr_user);
+	set_mte_ctrl(current, 0);
 }
 
 void mte_thread_switch(struct task_struct *next)
@@ -281,8 +279,10 @@  long set_mte_ctrl(struct task_struct *task, unsigned long arg)
 
 	task->thread.mte_ctrl = mte_ctrl;
 	if (task == current) {
+		preempt_disable();
 		mte_update_sctlr_user(task);
-		set_task_sctlr_el1(task->thread.sctlr_user);
+		update_sctlr_el1(task->thread.sctlr_user);
+		preempt_enable();
 	}
 
 	return 0;
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
index 60901ab0a7fe..2708b620b4ae 100644
--- a/arch/arm64/kernel/pointer_auth.c
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -67,7 +67,7 @@  static u64 arg_to_enxx_mask(unsigned long arg)
 int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
 			     unsigned long enabled)
 {
-	u64 sctlr = tsk->thread.sctlr_user;
+	u64 sctlr;
 
 	if (!system_supports_address_auth())
 		return -EINVAL;
@@ -78,12 +78,14 @@  int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
 	if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
 		return -EINVAL;
 
+	preempt_disable();
+	sctlr = tsk->thread.sctlr_user;
 	sctlr &= ~arg_to_enxx_mask(keys);
 	sctlr |= arg_to_enxx_mask(enabled);
+	tsk->thread.sctlr_user = sctlr;
 	if (tsk == current)
-		set_task_sctlr_el1(sctlr);
-	else
-		tsk->thread.sctlr_user = sctlr;
+		update_sctlr_el1(sctlr);
+	preempt_enable();
 
 	return 0;
 }
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index b4bb67f17a2c..c548eec87810 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -527,7 +527,13 @@  static void erratum_1418040_thread_switch(struct task_struct *prev,
 	write_sysreg(val, cntkctl_el1);
 }
 
-static void update_sctlr_el1(u64 sctlr)
+/*
+ * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
+ * this function must be called with preemption disabled and the update to
+ * sctlr_user must be made in the same preemption disabled block so that
+ * __switch_to() does not see the variable update before the SCTLR_EL1 one.
+ */
+void update_sctlr_el1(u64 sctlr)
 {
 	/*
 	 * EnIA must not be cleared while in the kernel as this is necessary for
@@ -539,19 +545,6 @@  static void update_sctlr_el1(u64 sctlr)
 	isb();
 }
 
-void set_task_sctlr_el1(u64 sctlr)
-{
-	/*
-	 * __switch_to() checks current->thread.sctlr as an
-	 * optimisation. Disable preemption so that it does not see
-	 * the variable update before the SCTLR_EL1 one.
-	 */
-	preempt_disable();
-	current->thread.sctlr_user = sctlr;
-	update_sctlr_el1(sctlr);
-	preempt_enable();
-}
-
 /*
  * Thread switching.
  */