diff mbox series

[v2,2/2] arm64: Stash shadow stack pointer in the task struct on interrupt

Message ID 20230109174800.3286265-3-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: harden shadow call stack pointer handling | expand

Commit Message

Ard Biesheuvel Jan. 9, 2023, 5:48 p.m. UTC
Instead of reloading the shadow call stack pointer from the ordinary
stack, which may be vulnerable to the kind of gadget based attacks
shadow call stacks were designed to prevent, let's store a task's shadow
call stack pointer in the task struct when switching to the shadow IRQ
stack.

Given that currently, the task_struct::scs_sp field is only used to
preserve the shadow call stack pointer while a task is scheduled out or
running in user space, reusing this field to preserve and restore it
while running off the IRQ stack must be safe, as those occurrences are
guaranteed to never overlap. (The stack switching logic only switches
stacks when running from the task stack, and so the value being saved
here always corresponds to the task mode shadow stack)

While at it, fold a mov/add/mov sequence into a single add.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/entry.S | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

Comments

Mark Rutland Jan. 10, 2023, 2:57 p.m. UTC | #1
On Mon, Jan 09, 2023 at 06:48:00PM +0100, Ard Biesheuvel wrote:
> Instead of reloading the shadow call stack pointer from the ordinary
> stack, which may be vulnerable to the kind of gadget based attacks
> shadow call stacks were designed to prevent, let's store a task's shadow
> call stack pointer in the task struct when switching to the shadow IRQ
> stack.
> 
> Given that currently, the task_struct::scs_sp field is only used to
> preserve the shadow call stack pointer while a task is scheduled out or
> running in user space, reusing this field to preserve and restore it
> while running off the IRQ stack must be safe, as those occurrences are
> guaranteed to never overlap. (The stack switching logic only switches
> stacks when running from the task stack, and so the value being saved
> here always corresponds to the task mode shadow stack)
> 
> While at it, fold a mov/add/mov sequence into a single add.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> ---
>  arch/arm64/kernel/entry.S | 12 +++++-------
>  1 file changed, 5 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 546f7773238ea45d..80d763e165fc5856 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -876,19 +876,19 @@ NOKPROBE(ret_from_fork)
>   */
>  SYM_FUNC_START(call_on_irq_stack)
>  #ifdef CONFIG_SHADOW_CALL_STACK
> -	stp	scs_sp, xzr, [sp, #-16]!
> +	get_current_task x16
> +	scs_save x16
>  	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
>  #endif
> +
>  	/* Create a frame record to save our LR and SP (implicit in FP) */
>  	stp	x29, x30, [sp, #-16]!
>  	mov	x29, sp
>  
>  	ldr_this_cpu x16, irq_stack_ptr, x17
> -	mov	x15, #IRQ_STACK_SIZE
> -	add	x16, x16, x15
>  
>  	/* Move to the new stack and call the function there */
> -	mov	sp, x16
> +	add	sp, x16, #IRQ_STACK_SIZE
>  	blr	x1
>  
>  	/*
> @@ -897,9 +897,7 @@ SYM_FUNC_START(call_on_irq_stack)
>  	 */
>  	mov	sp, x29
>  	ldp	x29, x30, [sp], #16
> -#ifdef CONFIG_SHADOW_CALL_STACK
> -	ldp	scs_sp, xzr, [sp], #16
> -#endif
> +	scs_load_current
>  	ret
>  SYM_FUNC_END(call_on_irq_stack)
>  NOKPROBE(call_on_irq_stack)
> -- 
> 2.39.0
>
Kees Cook Jan. 12, 2023, 10:18 p.m. UTC | #2
On Mon, Jan 09, 2023 at 06:48:00PM +0100, Ard Biesheuvel wrote:
> Instead of reloading the shadow call stack pointer from the ordinary
> stack, which may be vulnerable to the kind of gadget based attacks
> shadow call stacks were designed to prevent, let's store a task's shadow
> call stack pointer in the task struct when switching to the shadow IRQ
> stack.
> 
> Given that currently, the task_struct::scs_sp field is only used to
> preserve the shadow call stack pointer while a task is scheduled out or
> running in user space, reusing this field to preserve and restore it
> while running off the IRQ stack must be safe, as those occurrences are
> guaranteed to never overlap. (The stack switching logic only switches
> stacks when running from the task stack, and so the value being saved
> here always corresponds to the task mode shadow stack)
> 
> While at it, fold a mov/add/mov sequence into a single add.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Reviewed-by: Kees Cook <keescook@chromium.org>
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 546f7773238ea45d..80d763e165fc5856 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -876,19 +876,19 @@  NOKPROBE(ret_from_fork)
  */
 SYM_FUNC_START(call_on_irq_stack)
 #ifdef CONFIG_SHADOW_CALL_STACK
-	stp	scs_sp, xzr, [sp, #-16]!
+	get_current_task x16
+	scs_save x16
 	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
 #endif
+
 	/* Create a frame record to save our LR and SP (implicit in FP) */
 	stp	x29, x30, [sp, #-16]!
 	mov	x29, sp
 
 	ldr_this_cpu x16, irq_stack_ptr, x17
-	mov	x15, #IRQ_STACK_SIZE
-	add	x16, x16, x15
 
 	/* Move to the new stack and call the function there */
-	mov	sp, x16
+	add	sp, x16, #IRQ_STACK_SIZE
 	blr	x1
 
 	/*
@@ -897,9 +897,7 @@  SYM_FUNC_START(call_on_irq_stack)
 	 */
 	mov	sp, x29
 	ldp	x29, x30, [sp], #16
-#ifdef CONFIG_SHADOW_CALL_STACK
-	ldp	scs_sp, xzr, [sp], #16
-#endif
+	scs_load_current
 	ret
 SYM_FUNC_END(call_on_irq_stack)
 NOKPROBE(call_on_irq_stack)