diff mbox series

[XEN,v1,2/4] xen/arm32: head.S: Introduce enable_{boot,secondary}_cpu_mm()

Message ID 20230911135942.791206-3-ayan.kumar.halder@amd.com (mailing list archive)
State Superseded
Headers show
Series xen/arm: Split MMU code as the prepration of MPU work form Arm32 | expand

Commit Message

Ayan Kumar Halder Sept. 11, 2023, 1:59 p.m. UTC
This is based on:-
"[PATCH v6 01/13] xen/arm64: head.S: Introduce enable_{boot,secondary}_cpu_mm()"
https://www.mail-archive.com/xen-devel@lists.xenproject.org/msg151918.html

This is being done for Arm32 as MPU support will be added for Arm32 as well.

Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
---
 xen/arch/arm/arm32/head.S | 90 +++++++++++++++++++++++++++++----------
 1 file changed, 67 insertions(+), 23 deletions(-)

Comments

Julien Grall Oct. 16, 2023, 6:35 p.m. UTC | #1
Hi Ayan,

On 11/09/2023 14:59, Ayan Kumar Halder wrote:
> This is based on:-
> "[PATCH v6 01/13] xen/arm64: head.S: Introduce enable_{boot,secondary}_cpu_mm()"
> https://www.mail-archive.com/xen-devel@lists.xenproject.org/msg151918.html

Most of the explanation in that commit message doesn't apply here. You 
also move call like setup_fixmap without explaining why.

Also, if you want to refer to the arm64 patch in the commit message (I 
think it would be better after ---), then it is best to point ot the now 
committed patch.

>
> This is being done for Arm32 as MPU support will be added for Arm32 as well.
> 
> Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@amd.com>
> ---
>   xen/arch/arm/arm32/head.S | 90 +++++++++++++++++++++++++++++----------
>   1 file changed, 67 insertions(+), 23 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 057c44a5a2..c0c425eac6 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -201,13 +201,10 @@ past_zImage:
>   
>           bl    check_cpu_mode
>           bl    cpu_init

NIT: I would add a newline to make it clearer.

> -        bl    create_page_tables
> +        ldr   lr, =primary_switched

The existing code was using 'mov_w ...' to avoid load from memory. Can 
you explain why you switched to 'ldr'?

> +        b     enable_boot_cpu_mm
>   
> -        /* Address in the runtime mapping to jump to after the MMU is enabled */
> -        mov_w lr, primary_switched
> -        b     enable_mmu
>   primary_switched:
> -        bl    setup_fixmap
>   #ifdef CONFIG_EARLY_PRINTK
>           /* Use a virtual address to access the UART. */
>           mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
> @@ -249,27 +246,11 @@ GLOBAL(init_secondary)
>   #endif
>           bl    check_cpu_mode
>           bl    cpu_init
> -        bl    create_page_tables
>   
> -        /* Address in the runtime mapping to jump to after the MMU is enabled */
>           mov_w lr, secondary_switched
> -        b     enable_mmu
> -secondary_switched:
> -        /*
> -         * Non-boot CPUs need to move on to the proper pagetables, which were
> -         * setup in prepare_secondary_mm.
> -         *
> -         * XXX: This is not compliant with the Arm Arm.
> -         */
> -        mov_w r4, init_ttbr          /* VA of HTTBR value stashed by CPU 0 */
> -        ldrd  r4, r5, [r4]           /* Actual value */
> -        dsb
> -        mcrr  CP64(r4, r5, HTTBR)
> -        dsb
> -        isb
> -        flush_xen_tlb_local r0
> -        pt_enforce_wxn r0
> +        b     enable_secondary_cpu_mm
>   
> +secondary_switched:
>   #ifdef CONFIG_EARLY_PRINTK
>           /* Use a virtual address to access the UART. */
>           mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
> @@ -692,6 +673,69 @@ ready_to_switch:
>           mov   pc, lr
>   ENDPROC(switch_to_runtime_mapping)
>   
> +/*
> + * Enable mm (turn on the data cache and the MMU) for secondary CPUs.
> + * The function will return to the virtual address provided in LR (e.g. the
> + * runtime mapping).
> + *
> + * Inputs:
> + *   lr : Virtual address to return to.
> + *
> + * Clobbers r0 - r6
> + */
> +enable_secondary_cpu_mm:
> +        mov   r6, lr

NIT:  I would add a newline to make it clearer.

> +        bl    create_page_tables
> +

I think the comment on top of "mov_w lr, secondary_switched" should be 
moved here.

> +        mov_w lr, secondary_cpu_mmu_on
> +        b     enable_mmu
> +secondary_cpu_mmu_on:

I would prefer if we use 1 to avoid introducing local label.

> +        /*
> +         * Non-boot CPUs need to move on to the proper pagetables, which were
> +         * setup in prepare_secondary_mm.
> +         *
> +         * XXX: This is not compliant with the Arm Arm.
> +         */
> +        mov_w r4, init_ttbr          /* VA of HTTBR value stashed by CPU 0 */
> +        ldrd  r4, r5, [r4]           /* Actual value */
> +        dsb
> +        mcrr  CP64(r4, r5, HTTBR)
> +        dsb
> +        isb
> +        flush_xen_tlb_local r0
> +        pt_enforce_wxn r0
> +        mov   lr, r6
> +
> +        /* Return to the virtual address requested by the caller. */
> +        mov   pc, lr

The above two instructions can be combined to:


/* Return to the virtual address requested by the caller (r6). */
mov pc, r6

> +ENDPROC(enable_secondary_cpu_mm)
> +
> +/*
> + * Enable mm (turn on the data cache and the MMU) for the boot CPU.
> + * The function will return to the virtual address provided in LR (e.g. the
> + * runtime mapping).
> + *
> + * Inputs:
> + *   lr : Virtual address to return to.
> + *
> + * Clobbers r0 - r6
> + */
> +enable_boot_cpu_mm:
> +        mov   r6, lr

NIT:  I would add a newline to make it clearer.

> +        bl    create_page_tables
> +
> +        /* Address in the runtime mapping to jump to after the MMU is enabled */
> +        mov_w lr, boot_cpu_mmu_on
> +        b     enable_mmu
> +boot_cpu_mmu_on:

Same as the lable secondary_cpu_mmu_on.

> +        bl    setup_fixmap
> +
> +        mov   lr, r6
> +
> +        /* Return to the virtual address requested by the caller. */
> +        mov   pc, lr
In this case the 3 instructions can be replaced to:

mov lr, r6
b setup_fixmap

> +ENDPROC(enable_boot_cpu_mm)
> +
>   /*
>    * Remove the 1:1 map from the page-tables. It is not easy to keep track
>    * where the 1:1 map was mapped, so we will look for the top-level entry

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 057c44a5a2..c0c425eac6 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -201,13 +201,10 @@  past_zImage:
 
         bl    check_cpu_mode
         bl    cpu_init
-        bl    create_page_tables
+        ldr   lr, =primary_switched
+        b     enable_boot_cpu_mm
 
-        /* Address in the runtime mapping to jump to after the MMU is enabled */
-        mov_w lr, primary_switched
-        b     enable_mmu
 primary_switched:
-        bl    setup_fixmap
 #ifdef CONFIG_EARLY_PRINTK
         /* Use a virtual address to access the UART. */
         mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
@@ -249,27 +246,11 @@  GLOBAL(init_secondary)
 #endif
         bl    check_cpu_mode
         bl    cpu_init
-        bl    create_page_tables
 
-        /* Address in the runtime mapping to jump to after the MMU is enabled */
         mov_w lr, secondary_switched
-        b     enable_mmu
-secondary_switched:
-        /*
-         * Non-boot CPUs need to move on to the proper pagetables, which were
-         * setup in prepare_secondary_mm.
-         *
-         * XXX: This is not compliant with the Arm Arm.
-         */
-        mov_w r4, init_ttbr          /* VA of HTTBR value stashed by CPU 0 */
-        ldrd  r4, r5, [r4]           /* Actual value */
-        dsb
-        mcrr  CP64(r4, r5, HTTBR)
-        dsb
-        isb
-        flush_xen_tlb_local r0
-        pt_enforce_wxn r0
+        b     enable_secondary_cpu_mm
 
+secondary_switched:
 #ifdef CONFIG_EARLY_PRINTK
         /* Use a virtual address to access the UART. */
         mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
@@ -692,6 +673,69 @@  ready_to_switch:
         mov   pc, lr
 ENDPROC(switch_to_runtime_mapping)
 
+/*
+ * Enable mm (turn on the data cache and the MMU) for secondary CPUs.
+ * The function will return to the virtual address provided in LR (e.g. the
+ * runtime mapping).
+ *
+ * Inputs:
+ *   lr : Virtual address to return to.
+ *
+ * Clobbers r0 - r6
+ */
+enable_secondary_cpu_mm:
+        mov   r6, lr
+        bl    create_page_tables
+
+        mov_w lr, secondary_cpu_mmu_on
+        b     enable_mmu
+secondary_cpu_mmu_on:
+        /*
+         * Non-boot CPUs need to move on to the proper pagetables, which were
+         * setup in prepare_secondary_mm.
+         *
+         * XXX: This is not compliant with the Arm Arm.
+         */
+        mov_w r4, init_ttbr          /* VA of HTTBR value stashed by CPU 0 */
+        ldrd  r4, r5, [r4]           /* Actual value */
+        dsb
+        mcrr  CP64(r4, r5, HTTBR)
+        dsb
+        isb
+        flush_xen_tlb_local r0
+        pt_enforce_wxn r0
+        mov   lr, r6
+
+        /* Return to the virtual address requested by the caller. */
+        mov   pc, lr
+ENDPROC(enable_secondary_cpu_mm)
+
+/*
+ * Enable mm (turn on the data cache and the MMU) for the boot CPU.
+ * The function will return to the virtual address provided in LR (e.g. the
+ * runtime mapping).
+ *
+ * Inputs:
+ *   lr : Virtual address to return to.
+ *
+ * Clobbers r0 - r6
+ */
+enable_boot_cpu_mm:
+        mov   r6, lr
+        bl    create_page_tables
+
+        /* Address in the runtime mapping to jump to after the MMU is enabled */
+        mov_w lr, boot_cpu_mmu_on
+        b     enable_mmu
+boot_cpu_mmu_on:
+        bl    setup_fixmap
+
+        mov   lr, r6
+
+        /* Return to the virtual address requested by the caller. */
+        mov   pc, lr
+ENDPROC(enable_boot_cpu_mm)
+
 /*
  * Remove the 1:1 map from the page-tables. It is not easy to keep track
  * where the 1:1 map was mapped, so we will look for the top-level entry