@@ -545,9 +545,17 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* In future this may be nop'ed out when dealing with 52-bit kernel VAs.
* ttbr: Value of ttbr to set, modified.
*/
- .macro offset_ttbr1, ttbr
+ .macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_USER_VA_BITS_52
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+#endif
+
+#ifdef CONFIG_ARM64_VA_BITS_52
+ mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
+ and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ cbnz \tmp, .Lskipoffs_\@
+ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+.Lskipoffs_\@ :
#endif
.endm
@@ -557,7 +565,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* to be nop'ed out when dealing with 52-bit kernel VAs.
*/
.macro restore_ttbr1, ttbr
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_VA_BITS_52)
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
@@ -788,7 +788,7 @@ ENTRY(__enable_mmu)
phys_to_ttbr x1, x1
phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0
- offset_ttbr1 x1
+ offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
@@ -33,14 +33,14 @@
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
phys_to_ttbr \tmp, \page_table
- offset_ttbr1 \tmp
+ offset_ttbr1 \tmp, \tmp2
msr ttbr1_el1, \tmp
isb
.endm
@@ -81,7 +81,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0, x6
+ break_before_make_ttbr_switch x5, x0, x6, x8
mov x21, x1
mov x30, x2
@@ -112,7 +112,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21, x6
+ break_before_make_ttbr_switch x25, x21, x6, x8
ic ialluis
dsb ish
@@ -179,7 +179,7 @@ ENDPROC(cpu_do_switch_mm)
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page
phys_to_ttbr \tmp2, \tmp1
- offset_ttbr1 \tmp2
+ offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2
isb
tlbi vmalle1
@@ -198,7 +198,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3
- offset_ttbr1 x0
+ offset_ttbr1 x0, x3
msr ttbr1_el1, x0
isb
@@ -373,7 +373,7 @@ __idmap_kpti_secondary:
cbnz w18, 1b
/* All done, act like nothing happened */
- offset_ttbr1 swapper_ttb
+ offset_ttbr1 swapper_ttb, x18
msr ttbr1_el1, swapper_ttb
isb
ret
When running with a 52-bit userspace VA and a 48-bit kernel VA we offset ttbr1_el1 to allow the kernel pagetables with a 52-bit PTRS_PER_PGD to be used for both userspace and kernel. Moving on to a 52-bit kernel VA we no longer require this offset to ttbr1_el1 should we be running on a system with HW support for 52-bit VAs. This patch introduces conditional logic to offset_ttbr1 to query SYS_ID_AA64MMFR2_EL1 whenever 52-bit VAs are selected. If there is HW support for 52-bit VAs then the ttbr1 offset is skipped. We choose to read a system register rather than vabits_actual because offset_ttbr1 can be called in places where the kernel data is not actually mapped. Calls to offset_ttbr1 appear to be made from rarely called code paths so this extra logic is not expected to adversely affect performance. Signed-off-by: Steve Capper <steve.capper@arm.com> --- Changed in V3, move away from alternative framework as offset_ttbr1 can be called in places before the alternative framework has been initialised. --- arch/arm64/include/asm/assembler.h | 12 ++++++++++-- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/hibernate-asm.S | 8 ++++---- arch/arm64/mm/proc.S | 6 +++--- 4 files changed, 18 insertions(+), 10 deletions(-)