diff mbox series

[v6,4/6] arm64: head: avoid cache invalidation when entering with the MMU on

Message ID 20221129161418.1968319-5-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Permit EFI boot with MMU and caches on | expand

Commit Message

Ard Biesheuvel Nov. 29, 2022, 4:14 p.m. UTC
If we enter with the MMU on, there is no need for explicit cache
invalidation for stores to memory, as they will be coherent with the
caches.

Let's take advantage of this, and create the ID map with the MMU still
enabled if that is how we entered, and avoid any cache invalidation
calls in that case.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/head.S | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c3b97f4ae6d769f7..5abf8f9fdd97b673 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -93,9 +93,9 @@  SYM_CODE_START(efi_primary_entry)
 SYM_INNER_LABEL(primary_entry, SYM_L_LOCAL)
 	mov	x19, xzr			// MMU must be off on bare metal boot
 0:	bl	preserve_boot_args
+	bl	create_idmap
 	bl	init_kernel_el			// w0=cpu_boot_mode
 	mov	x20, x0
-	bl	create_idmap
 
 	/*
 	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -381,12 +381,13 @@  SYM_FUNC_START_LOCAL(create_idmap)
 	 * accesses (MMU disabled), invalidate those tables again to
 	 * remove any speculatively loaded cache lines.
 	 */
+	cbnz	x19, 0f				// skip cache invalidation if MMU is on
 	dmb	sy
 
 	adrp	x0, init_idmap_pg_dir
 	adrp	x1, init_idmap_pg_end
 	bl	dcache_inval_poc
-	ret	x28
+0:	ret	x28
 SYM_FUNC_END(create_idmap)
 
 SYM_FUNC_START_LOCAL(create_kernel_mapping)