@@ -201,13 +201,11 @@ past_zImage:
bl check_cpu_mode
bl cpu_init
- bl create_page_tables
- /* Address in the runtime mapping to jump to after the MMU is enabled */
mov_w lr, primary_switched
- b enable_mmu
+ b enable_boot_cpu_mm
+
primary_switched:
- bl setup_fixmap
#ifdef CONFIG_EARLY_PRINTK
/* Use a virtual address to access the UART. */
mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
@@ -249,27 +247,11 @@ GLOBAL(init_secondary)
#endif
bl check_cpu_mode
bl cpu_init
- bl create_page_tables
- /* Address in the runtime mapping to jump to after the MMU is enabled */
mov_w lr, secondary_switched
- b enable_mmu
-secondary_switched:
- /*
- * Non-boot CPUs need to move on to the proper pagetables, which were
- * setup in prepare_secondary_mm.
- *
- * XXX: This is not compliant with the Arm Arm.
- */
- mov_w r4, init_ttbr /* VA of HTTBR value stashed by CPU 0 */
- ldrd r4, r5, [r4] /* Actual value */
- dsb
- mcrr CP64(r4, r5, HTTBR)
- dsb
- isb
- flush_xen_tlb_local r0
- pt_enforce_wxn r0
+ b enable_secondary_cpu_mm
+secondary_switched:
#ifdef CONFIG_EARLY_PRINTK
/* Use a virtual address to access the UART. */
mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
@@ -692,6 +674,82 @@ ready_to_switch:
mov pc, lr
ENDPROC(switch_to_runtime_mapping)
+/*
+ * Enable mm (turn on the data cache and the MMU) for secondary CPUs.
+ * The function will return to the virtual address provided in LR (e.g. the
+ * runtime mapping).
+ *
+ * Inputs:
+ * r9 : paddr(start)
+ * r10: phys offset
+ * lr : Virtual address to return to.
+ *
+ * Output:
+ * r12: Was a temporary mapping created?
+ *
+ * Clobbers r0 - r6
+ */
+enable_secondary_cpu_mm:
+ mov r6, lr
+
+ bl create_page_tables
+
+ /* Address in the runtime mapping to jump to after the MMU is enabled */
+ mov_w lr, 1f
+ b enable_mmu
+1:
+ /*
+ * Non-boot CPUs need to move on to the proper pagetables, which were
+ * setup in prepare_secondary_mm.
+ *
+ * XXX: This is not compliant with the Arm Arm.
+ */
+ mov_w r4, init_ttbr /* VA of HTTBR value stashed by CPU 0 */
+ ldrd r4, r5, [r4] /* Actual value */
+ dsb
+ mcrr CP64(r4, r5, HTTBR)
+ dsb
+ isb
+ flush_xen_tlb_local r0
+ pt_enforce_wxn r0
+
+ /* Return to the virtual address requested by the caller. */
+ mov pc, r6
+ENDPROC(enable_secondary_cpu_mm)
+
+/*
+ * Enable mm (turn on the data cache and the MMU) for the boot CPU.
+ * The function will return to the virtual address provided in LR (e.g. the
+ * runtime mapping).
+ *
+ * Inputs:
+ * r9 : paddr(start)
+ * r10: phys offset
+ * lr : Virtual address to return to.
+ *
+ * Output:
+ * r12: Was a temporary mapping created?
+ *
+ * Clobbers r0 - r6
+ */
+enable_boot_cpu_mm:
+ mov r6, lr
+
+ bl create_page_tables
+
+ /* Address in the runtime mapping to jump to after the MMU is enabled */
+ mov_w lr, 1f
+ b enable_mmu
+1:
+ mov lr, r6
+
+ /*
+ * Prepare the fixmap. The function will return to the virtual address
+ * requested by the caller.
+ */
+ b setup_fixmap
+ENDPROC(enable_boot_cpu_mm)
+
/*
* Remove the 1:1 map from the page-tables. It is not easy to keep track
* where the 1:1 map was mapped, so we will look for the top-level entry