diff mbox series

[BOOT-WRAPPER,v3,06/10] aarch32: Always enter kernel via exception return

Message ID 20240822101441.251184-7-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series Cleanup initialization | expand

Commit Message

Mark Rutland Aug. 22, 2024, 10:14 a.m. UTC
When the boot-wrapper is entered at Secure PL1 it will enter the kernel
via an exception return, and when entered at Hyp it will branch to the
kernel directly. This is an artifact of the way the boot-wrapper was
originally written in assembly, and it would be preferable to always
enter the kernel via an exception return so that PSTATE is always
initialized to a known-good value.

Rework jump_kernel() to always enter the kernel via an exception return,
matching the style of the AArch64 version of jump_kernel()

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Cc: Akos Denke <akos.denke@arm.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: Luca Fancellu <luca.fancellu@arm.com>
---
 arch/aarch32/boot.S | 48 +++++++++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 23 deletions(-)

Comments

Andre Przywara Aug. 22, 2024, 4:04 p.m. UTC | #1
On Thu, 22 Aug 2024 11:14:37 +0100
Mark Rutland <mark.rutland@arm.com> wrote:

> When the boot-wrapper is entered at Secure PL1 it will enter the kernel
> via an exception return, and when entered at Hyp it will branch to the
> kernel directly. This is an artifact of the way the boot-wrapper was
> originally written in assembly, and it would be preferable to always
> enter the kernel via an exception return so that PSTATE is always
> initialized to a known-good value.
> 
> Rework jump_kernel() to always enter the kernel via an exception return,
> matching the style of the AArch64 version of jump_kernel()
> 
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Acked-by: Marc Zyngier <maz@kernel.org>
> Cc: Akos Denke <akos.denke@arm.com>
> Cc: Andre Przywara <andre.przywara@arm.com>
> Cc: Luca Fancellu <luca.fancellu@arm.com>

Thanks for the changes, looks good to me now!

Reviewed-by: Andre Przywara <andre.przywara@arm.com>

Cheers,
Andre

> ---
>  arch/aarch32/boot.S | 48 +++++++++++++++++++++++----------------------
>  1 file changed, 25 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/aarch32/boot.S b/arch/aarch32/boot.S
> index f21f89a..e79aa06 100644
> --- a/arch/aarch32/boot.S
> +++ b/arch/aarch32/boot.S
> @@ -76,10 +76,6 @@ reset_at_hyp:
>  
>  	bl	setup_stack
>  
> -	mov	r0, #1
> -	ldr	r1, =flag_no_el3
> -	str	r0, [r1]
> -
>  	bl	cpu_init_bootwrapper
>  
>  	bl	cpu_init_arch
> @@ -96,9 +92,10 @@ err_invalid_id:
>  	 * r1-r3, sp[0]: kernel arguments
>  	 */
>  ASM_FUNC(jump_kernel)
> -	sub	sp, #4				@ Ignore fourth argument
> -	push	{r0 - r3}
> -	mov	r5, sp
> +	mov	r4, r0
> +	mov	r5, r1
> +	mov	r6, r2
> +	mov	r7, r3
>  
>  	ldr	r0, =HSCTLR_KERNEL
>  	mcr	p15, 4, r0, c1, c0, 0		@ HSCTLR
> @@ -111,23 +108,28 @@ ASM_FUNC(jump_kernel)
>  	bl	find_logical_id
>  	bl	setup_stack
>  
> -	ldr	lr, [r5], #4
> -	ldm	r5, {r0 - r2}
> -
> -	ldr	r4, =flag_no_el3
> -	ldr	r4, [r4]
> -	cmp	r4, #1
> -	bxeq	lr				@ no EL3
> +	mov	r0, r5
> +	mov	r1, r6
> +	mov	r2, r7
> +	ldr	r3, =SPSR_KERNEL
>  
> -	ldr	r4, =SPSR_KERNEL
>  	/* Return in thumb2 mode when bit 0 of address is 1 */
> -	tst	lr, #1
> -	orrne	r4, #PSR_T
> +	tst	r4, #1
> +	orrne	r3, #PSR_T
> +
> +	mrs	r5, cpsr
> +	and	r5, #PSR_MODE_MASK
> +	cmp	r5, #PSR_MON
> +	beq	eret_at_mon
> +	cmp	r5, #PSR_HYP
> +	beq	eret_at_hyp
> +	b	.
>  
> -	msr	spsr_cxf, r4
> +eret_at_mon:
> +	mov	lr, r4
> +	msr	spsr_mon, r3
>  	movs	pc, lr
> -
> -	.section .data
> -	.align 2
> -flag_no_el3:
> -	.long 0
> +eret_at_hyp:
> +	msr	elr_hyp, r4
> +	msr	spsr_hyp, r3
> +	eret
diff mbox series

Patch

diff --git a/arch/aarch32/boot.S b/arch/aarch32/boot.S
index f21f89a..e79aa06 100644
--- a/arch/aarch32/boot.S
+++ b/arch/aarch32/boot.S
@@ -76,10 +76,6 @@  reset_at_hyp:
 
 	bl	setup_stack
 
-	mov	r0, #1
-	ldr	r1, =flag_no_el3
-	str	r0, [r1]
-
 	bl	cpu_init_bootwrapper
 
 	bl	cpu_init_arch
@@ -96,9 +92,10 @@  err_invalid_id:
 	 * r1-r3, sp[0]: kernel arguments
 	 */
 ASM_FUNC(jump_kernel)
-	sub	sp, #4				@ Ignore fourth argument
-	push	{r0 - r3}
-	mov	r5, sp
+	mov	r4, r0
+	mov	r5, r1
+	mov	r6, r2
+	mov	r7, r3
 
 	ldr	r0, =HSCTLR_KERNEL
 	mcr	p15, 4, r0, c1, c0, 0		@ HSCTLR
@@ -111,23 +108,28 @@  ASM_FUNC(jump_kernel)
 	bl	find_logical_id
 	bl	setup_stack
 
-	ldr	lr, [r5], #4
-	ldm	r5, {r0 - r2}
-
-	ldr	r4, =flag_no_el3
-	ldr	r4, [r4]
-	cmp	r4, #1
-	bxeq	lr				@ no EL3
+	mov	r0, r5
+	mov	r1, r6
+	mov	r2, r7
+	ldr	r3, =SPSR_KERNEL
 
-	ldr	r4, =SPSR_KERNEL
 	/* Return in thumb2 mode when bit 0 of address is 1 */
-	tst	lr, #1
-	orrne	r4, #PSR_T
+	tst	r4, #1
+	orrne	r3, #PSR_T
+
+	mrs	r5, cpsr
+	and	r5, #PSR_MODE_MASK
+	cmp	r5, #PSR_MON
+	beq	eret_at_mon
+	cmp	r5, #PSR_HYP
+	beq	eret_at_hyp
+	b	.
 
-	msr	spsr_cxf, r4
+eret_at_mon:
+	mov	lr, r4
+	msr	spsr_mon, r3
 	movs	pc, lr
-
-	.section .data
-	.align 2
-flag_no_el3:
-	.long 0
+eret_at_hyp:
+	msr	elr_hyp, r4
+	msr	spsr_hyp, r3
+	eret