@@ -41,7 +41,8 @@
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
+#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_NCAPS 23
+#define ARM64_NCAPS 24
#endif /* __ASM_CPUCAPS_H */
@@ -36,7 +36,8 @@ typedef struct {
static inline bool arm64_kernel_unmapped_at_el0(void)
{
- return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
extern void paging_init(void);
@@ -845,6 +845,40 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
ID_AA64PFR0_FP_SHIFT) < 0;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kaiser_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ /* Forced on command line? */
+ if (__kaiser_forced) {
+ pr_info("KAISER forced %s by command line option\n",
+ __kaiser_forced > 0 ? "ON" : "OFF");
+ return __kaiser_forced > 0;
+ }
+
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return true;
+
+ return false;
+}
+
+static int __init parse_kaiser(char *str)
+{
+ bool enabled;
+ int ret = strtobool(str, &enabled);
+
+ if (ret)
+ return ret;
+
+ __kaiser_forced = enabled ? 1 : -1;
+ return 0;
+}
+__setup("kaiser=", parse_kaiser);
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -931,6 +965,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ {
+ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = unmap_kernel_at_el0,
+ },
+#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -74,6 +74,7 @@
.macro kernel_ventry, el, label, regsize = 64
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
.if \el == 0
.if \regsize == 64
mrs x30, tpidrro_el0
@@ -82,6 +83,7 @@
mov x30, xzr
.endif
.endif
+alternative_else_nop_endif
#endif
sub sp, sp, #S_FRAME_SIZE
@@ -323,10 +325,10 @@ alternative_else_nop_endif
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
-#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
- eret
-#else
.if \el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
bne 4f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
@@ -334,10 +336,11 @@ alternative_else_nop_endif
4:
tramp_alias x30, tramp_exit_compat
br x30
+alternative_else_nop_endif
+#endif
.else
eret
.endif
-#endif
.endm
.macro irq_stack_entry
Allow explicit disabling of the entry trampoline on the kernel command line (kaiser=off) by adding a fake CPU feature (ARM64_UNMAP_KERNEL_AT_EL0) that can be used to toggle the alternative sequences in our entry code and avoid use of the trampoline altogether if desired. This also allows us to make use of a static key in arm64_kernel_unmapped_at_el0(). Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/mmu.h | 3 ++- arch/arm64/kernel/cpufeature.c | 41 ++++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 11 +++++++---- 4 files changed, 52 insertions(+), 6 deletions(-)