@@ -32,8 +32,8 @@ extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
-void cpu_soft_restart(phys_addr_t cpu_reset,
- unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
+ unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
@@ -50,6 +50,7 @@
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
+#include <asm/virt.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
@@ -60,7 +61,14 @@ EXPORT_SYMBOL(__stack_chk_guard);
void soft_restart(unsigned long addr)
{
setup_mm_for_reboot();
- cpu_soft_restart(virt_to_phys(cpu_reset), addr);
+
+ /* TODO: Remove this conditional when KVM can support CPU restart. */
+ if (IS_ENABLED(CONFIG_KVM))
+ cpu_soft_restart(virt_to_phys(cpu_reset), 0, addr);
+ else
+ cpu_soft_restart(virt_to_phys(cpu_reset),
+ is_hyp_mode_available(), addr);
+
/* Should never get here */
BUG();
}
@@ -25,6 +25,7 @@
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
+#include <asm/virt.h>
#include "proc-macros.S"
@@ -59,27 +60,48 @@ ENTRY(cpu_cache_off)
ENDPROC(cpu_cache_off)
/*
- * cpu_reset(loc)
+ * cpu_reset(el2_switch, loc) - Helper for cpu_soft_restart.
*
- * Perform a soft reset of the system. Put the CPU into the same state
- * as it would be if it had been reset, and branch to what would be the
- * reset vector. It must be executed with the flat identity mapping.
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @addr: Location to jump to for soft reset.
*
- * - loc - location to jump to for soft reset
+ * Put the CPU into the same state as it would be if it had been reset, and
+ * branch to what would be the reset vector. It must be executed with the
+ * flat identity mapping.
*/
+
.align 5
+
ENTRY(cpu_reset)
- mrs x1, sctlr_el1
- bic x1, x1, #1
- msr sctlr_el1, x1 // disable the MMU
+ mrs x2, sctlr_el1
+ bic x2, x2, #1
+ msr sctlr_el1, x2 // disable the MMU
isb
- ret x0
+
+ cbz x0, 1f // el2_switch?
+ mov x0, x1
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ hvc #HVC_CALL_FUNC // no return
+
+1: ret x1
ENDPROC(cpu_reset)
+/*
+ * cpu_soft_restart(cpu_reset, el2_switch, addr) - Perform a cpu soft reset.
+ *
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed, passed to cpu_reset.
+ * @addr: Location to jump to for soft reset, passed to cpu_reset.
+ *
+ */
+
ENTRY(cpu_soft_restart)
- /* Save address of cpu_reset() and reset address */
- mov x19, x0
- mov x20, x1
+ mov x19, x0 // cpu_reset
+ mov x20, x1 // el2_switch
+ mov x21, x2 // addr
/* Turn D-cache off */
bl cpu_cache_off
@@ -88,6 +110,7 @@ ENTRY(cpu_soft_restart)
bl flush_cache_all
mov x0, x20
+ mov x1, x21
ret x19
ENDPROC(cpu_soft_restart)
When a CPU is reset it needs to be put into the exception level it had when it entered the kernel. Update cpu_reset() to accept an argument el2_switch which signals cpu_reset() to enter the soft reset address at EL2. If el2_switch is not set the soft reset address will be entered at EL1. Update cpu_soft_restart() and soft_restart() to pass the return of is_hyp_mode_available() as the el2_switch value to cpu_reset(). Also update the comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this change. Signed-off-by: Geoff Levand <geoff@infradead.org> --- arch/arm64/include/asm/proc-fns.h | 4 ++-- arch/arm64/kernel/process.c | 10 ++++++++- arch/arm64/mm/proc.S | 47 +++++++++++++++++++++++++++++---------- 3 files changed, 46 insertions(+), 15 deletions(-)