diff mbox

[4/8] arm64: Add EL2 switch to soft_restart

Message ID bb44d4eee4fab15278e4a8829a0981878bc9acd7.1421449714.git.geoff@infradead.org (mailing list archive)
State New, archived
Headers show

Commit Message

Geoff Levand Jan. 17, 2015, 12:23 a.m. UTC
When a CPU is reset it needs to be put into the exception level it had when it
entered the kernel.  Update cpu_reset() to accept an argument el2_switch which
signals cpu_reset() to enter the soft reset address at EL2.  If el2_switch is
not set the soft reset address will be entered at EL1.

Update cpu_soft_restart() and soft_restart() to pass the return of
is_hyp_mode_available() as the el2_switch value to cpu_reset().  Also update the
comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this
change.

Signed-off-by: Geoff Levand <geoff@infradead.org>
---
 arch/arm64/include/asm/proc-fns.h |  4 ++--
 arch/arm64/kernel/process.c       | 10 ++++++++-
 arch/arm64/mm/proc.S              | 47 +++++++++++++++++++++++++++++----------
 3 files changed, 46 insertions(+), 15 deletions(-)

Comments

Mark Rutland Jan. 26, 2015, 7:02 p.m. UTC | #1
On Sat, Jan 17, 2015 at 12:23:34AM +0000, Geoff Levand wrote:
> When a CPU is reset it needs to be put into the exception level it had when it
> entered the kernel.  Update cpu_reset() to accept an argument el2_switch which
> signals cpu_reset() to enter the soft reset address at EL2.  If el2_switch is
> not set the soft reset address will be entered at EL1.
> 
> Update cpu_soft_restart() and soft_restart() to pass the return of
> is_hyp_mode_available() as the el2_switch value to cpu_reset().  Also update the
> comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this
> change.
> 
> Signed-off-by: Geoff Levand <geoff@infradead.org>
> ---
>  arch/arm64/include/asm/proc-fns.h |  4 ++--
>  arch/arm64/kernel/process.c       | 10 ++++++++-
>  arch/arm64/mm/proc.S              | 47 +++++++++++++++++++++++++++++----------
>  3 files changed, 46 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
> index 9a8fd84..339394d 100644
> --- a/arch/arm64/include/asm/proc-fns.h
> +++ b/arch/arm64/include/asm/proc-fns.h
> @@ -32,8 +32,8 @@ extern void cpu_cache_off(void);
>  extern void cpu_do_idle(void);
>  extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
>  extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
> -void cpu_soft_restart(phys_addr_t cpu_reset,
> -		unsigned long addr) __attribute__((noreturn));
> +void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
> +		      unsigned long addr) __attribute__((noreturn));
>  extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
>  extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
>  
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index fde9923..371bbf1 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -50,6 +50,7 @@
>  #include <asm/mmu_context.h>
>  #include <asm/processor.h>
>  #include <asm/stacktrace.h>
> +#include <asm/virt.h>
>  
>  #ifdef CONFIG_CC_STACKPROTECTOR
>  #include <linux/stackprotector.h>
> @@ -60,7 +61,14 @@ EXPORT_SYMBOL(__stack_chk_guard);
>  void soft_restart(unsigned long addr)
>  {
>  	setup_mm_for_reboot();
> -	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
> +
> +	/* TODO: Remove this conditional when KVM can support CPU restart. */
> +	if (IS_ENABLED(CONFIG_KVM))
> +		cpu_soft_restart(virt_to_phys(cpu_reset), 0, addr);

If we haven't torn down KVM, doesn't that mean that KVM is active at EL2
(with MMU and caches on) at this point?

If that's the case then we cannot possibly try to call kexec(), because
we cannot touch the memory used by the page tables for those EL2
mappings. Things will explode if we do.

Mark.
Geoff Levand Jan. 26, 2015, 9:48 p.m. UTC | #2
Hi Mark,

On Mon, 2015-01-26 at 19:02 +0000, Mark Rutland wrote:
> On Sat, Jan 17, 2015 at 12:23:34AM +0000, Geoff Levand wrote:
> > When a CPU is reset it needs to be put into the exception level it had when it
> > entered the kernel.  Update cpu_reset() to accept an argument el2_switch which
> > signals cpu_reset() to enter the soft reset address at EL2.  If el2_switch is
> > not set the soft reset address will be entered at EL1.
> > 
> > Update cpu_soft_restart() and soft_restart() to pass the return of
> > is_hyp_mode_available() as the el2_switch value to cpu_reset().  Also update the
> > comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this
> > change.
> > 
> > Signed-off-by: Geoff Levand <geoff@infradead.org>
> > ---
> >  arch/arm64/include/asm/proc-fns.h |  4 ++--
> >  arch/arm64/kernel/process.c       | 10 ++++++++-
> >  arch/arm64/mm/proc.S              | 47 +++++++++++++++++++++++++++++----------
> >  3 files changed, 46 insertions(+), 15 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
> > index 9a8fd84..339394d 100644
> > --- a/arch/arm64/include/asm/proc-fns.h
> > +++ b/arch/arm64/include/asm/proc-fns.h
> > @@ -32,8 +32,8 @@ extern void cpu_cache_off(void);
> >  extern void cpu_do_idle(void);
> >  extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
> >  extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
> > -void cpu_soft_restart(phys_addr_t cpu_reset,
> > -		unsigned long addr) __attribute__((noreturn));
> > +void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
> > +		      unsigned long addr) __attribute__((noreturn));
> >  extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
> >  extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
> >  
> > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> > index fde9923..371bbf1 100644
> > --- a/arch/arm64/kernel/process.c
> > +++ b/arch/arm64/kernel/process.c
> > @@ -50,6 +50,7 @@
> >  #include <asm/mmu_context.h>
> >  #include <asm/processor.h>
> >  #include <asm/stacktrace.h>
> > +#include <asm/virt.h>
> >  
> >  #ifdef CONFIG_CC_STACKPROTECTOR
> >  #include <linux/stackprotector.h>
> > @@ -60,7 +61,14 @@ EXPORT_SYMBOL(__stack_chk_guard);
> >  void soft_restart(unsigned long addr)
> >  {
> >  	setup_mm_for_reboot();
> > -	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
> > +
> > +	/* TODO: Remove this conditional when KVM can support CPU restart. */
> > +	if (IS_ENABLED(CONFIG_KVM))
> > +		cpu_soft_restart(virt_to_phys(cpu_reset), 0, addr);
> 
> If we haven't torn down KVM, doesn't that mean that KVM is active at EL2
> (with MMU and caches on) at this point?
> 
> If that's the case then we cannot possibly try to call kexec(), because
> we cannot touch the memory used by the page tables for those EL2
> mappings. Things will explode if we do.

This conditional is just if KVM, do things the old way (don't try to
switch exception levels).  It is to handle the system shutdown case.

Another patch in this series '[PATCH 7/8] arm64/kexec: Add checks for
KVM' assures kexec cannot happen when KVM is configured.

-Geoff
Mark Rutland Jan. 27, 2015, 4:46 p.m. UTC | #3
On Mon, Jan 26, 2015 at 09:48:48PM +0000, Geoff Levand wrote:
> Hi Mark,
> 
> On Mon, 2015-01-26 at 19:02 +0000, Mark Rutland wrote:
> > On Sat, Jan 17, 2015 at 12:23:34AM +0000, Geoff Levand wrote:
> > > When a CPU is reset it needs to be put into the exception level it had when it
> > > entered the kernel.  Update cpu_reset() to accept an argument el2_switch which
> > > signals cpu_reset() to enter the soft reset address at EL2.  If el2_switch is
> > > not set the soft reset address will be entered at EL1.
> > > 
> > > Update cpu_soft_restart() and soft_restart() to pass the return of
> > > is_hyp_mode_available() as the el2_switch value to cpu_reset().  Also update the
> > > comments of cpu_reset(), cpu_soft_restart() and soft_restart() to reflect this
> > > change.
> > > 
> > > Signed-off-by: Geoff Levand <geoff@infradead.org>
> > > ---
> > >  arch/arm64/include/asm/proc-fns.h |  4 ++--
> > >  arch/arm64/kernel/process.c       | 10 ++++++++-
> > >  arch/arm64/mm/proc.S              | 47 +++++++++++++++++++++++++++++----------
> > >  3 files changed, 46 insertions(+), 15 deletions(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
> > > index 9a8fd84..339394d 100644
> > > --- a/arch/arm64/include/asm/proc-fns.h
> > > +++ b/arch/arm64/include/asm/proc-fns.h
> > > @@ -32,8 +32,8 @@ extern void cpu_cache_off(void);
> > >  extern void cpu_do_idle(void);
> > >  extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
> > >  extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
> > > -void cpu_soft_restart(phys_addr_t cpu_reset,
> > > -		unsigned long addr) __attribute__((noreturn));
> > > +void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
> > > +		      unsigned long addr) __attribute__((noreturn));
> > >  extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
> > >  extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
> > >  
> > > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> > > index fde9923..371bbf1 100644
> > > --- a/arch/arm64/kernel/process.c
> > > +++ b/arch/arm64/kernel/process.c
> > > @@ -50,6 +50,7 @@
> > >  #include <asm/mmu_context.h>
> > >  #include <asm/processor.h>
> > >  #include <asm/stacktrace.h>
> > > +#include <asm/virt.h>
> > >  
> > >  #ifdef CONFIG_CC_STACKPROTECTOR
> > >  #include <linux/stackprotector.h>
> > > @@ -60,7 +61,14 @@ EXPORT_SYMBOL(__stack_chk_guard);
> > >  void soft_restart(unsigned long addr)
> > >  {
> > >  	setup_mm_for_reboot();
> > > -	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
> > > +
> > > +	/* TODO: Remove this conditional when KVM can support CPU restart. */
> > > +	if (IS_ENABLED(CONFIG_KVM))
> > > +		cpu_soft_restart(virt_to_phys(cpu_reset), 0, addr);
> > 
> > If we haven't torn down KVM, doesn't that mean that KVM is active at EL2
> > (with MMU and caches on) at this point?
> > 
> > If that's the case then we cannot possibly try to call kexec(), because
> > we cannot touch the memory used by the page tables for those EL2
> > mappings. Things will explode if we do.
> 
> This conditional is just if KVM, do things the old way (don't try to
> switch exception levels).  It is to handle the system shutdown case.

Having grepped treewide for soft_restart, other than kexec there are no
users for arm64. So surely kexec is the only case to cater for at the
moment?

> Another patch in this series '[PATCH 7/8] arm64/kexec: Add checks for
> KVM' assures kexec cannot happen when KVM is configured.

It would be better to just move this earlier (or event better, implement
kvm teardown).

Mark.
Catalin Marinas Jan. 27, 2015, 5:57 p.m. UTC | #4
On Sat, Jan 17, 2015 at 12:23:34AM +0000, Geoff Levand wrote:
>  ENTRY(cpu_reset)
> -	mrs	x1, sctlr_el1
> -	bic	x1, x1, #1
> -	msr	sctlr_el1, x1			// disable the MMU
> +	mrs	x2, sctlr_el1
> +	bic	x2, x2, #1
> +	msr	sctlr_el1, x2			// disable the MMU
>  	isb
> -	ret	x0
> +
> +	cbz	x0, 1f				// el2_switch?
> +	mov	x0, x1
> +	mov	x1, xzr
> +	mov	x2, xzr
> +	mov	x3, xzr
> +	hvc	#HVC_CALL_FUNC			// no return

If that's the only user of HVC_CALL_FUNC, why do we bother with
arguments, calling convention?
Geoff Levand Jan. 27, 2015, 6:34 p.m. UTC | #5
Hi Mark,

On Tue, 2015-01-27 at 16:46 +0000, Mark Rutland wrote:
> On Mon, Jan 26, 2015 at 09:48:48PM +0000, Geoff Levand wrote:
> > This conditional is just if KVM, do things the old way (don't try to
> > switch exception levels).  It is to handle the system shutdown case.
> 
> Having grepped treewide for soft_restart, other than kexec there are no
> users for arm64. So surely kexec is the only case to cater for at the
> moment?

Yes, I think you're right, and so it seems we can drop this patch and
just have the 'Add checks for KVM' patch.

> > Another patch in this series '[PATCH 7/8] arm64/kexec: Add checks for
> > KVM' assures kexec cannot happen when KVM is configured.
> 
> It would be better to just move this earlier (or event better, implement
> kvm teardown).

Yes, I hope we don't really need to have any KVM work-arounds.

-Geoff
Geoff Levand Jan. 30, 2015, 9:47 p.m. UTC | #6
Hi Catalin,

On Tue, 2015-01-27 at 17:57 +0000, Catalin Marinas wrote:
> On Sat, Jan 17, 2015 at 12:23:34AM +0000, Geoff Levand wrote:
> >  ENTRY(cpu_reset)
> > -	mrs	x1, sctlr_el1
> > -	bic	x1, x1, #1
> > -	msr	sctlr_el1, x1			// disable the MMU
> > +	mrs	x2, sctlr_el1
> > +	bic	x2, x2, #1
> > +	msr	sctlr_el1, x2			// disable the MMU
> >  	isb
> > -	ret	x0
> > +
> > +	cbz	x0, 1f				// el2_switch?
> > +	mov	x0, x1
> > +	mov	x1, xzr
> > +	mov	x2, xzr
> > +	mov	x3, xzr
> > +	hvc	#HVC_CALL_FUNC			// no return
> 
> If that's the only user of HVC_CALL_FUNC, why do we bother with
> arguments, calling convention?

It was intended that HVC_CALL_FUNC be a mechanism to call a generic
function in hyp mode.  cpu_reset() is the only user of it now.

As Mark explained in another post, the use of x18 by the hyp stub
is to avoid the complication of setting up a stack for EL2.  We
thought this solution was acceptable since there are relatively
few HVC_CALL_FUNC calls and we could assure they would do the
right thing.

-Geoff
diff mbox

Patch

diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84..339394d 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -32,8 +32,8 @@  extern void cpu_cache_off(void);
 extern void cpu_do_idle(void);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
-void cpu_soft_restart(phys_addr_t cpu_reset,
-		unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset, unsigned long el2_switch,
+		      unsigned long addr) __attribute__((noreturn));
 extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
 extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fde9923..371bbf1 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -50,6 +50,7 @@ 
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
 #include <asm/stacktrace.h>
+#include <asm/virt.h>
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #include <linux/stackprotector.h>
@@ -60,7 +61,14 @@  EXPORT_SYMBOL(__stack_chk_guard);
 void soft_restart(unsigned long addr)
 {
 	setup_mm_for_reboot();
-	cpu_soft_restart(virt_to_phys(cpu_reset), addr);
+
+	/* TODO: Remove this conditional when KVM can support CPU restart. */
+	if (IS_ENABLED(CONFIG_KVM))
+		cpu_soft_restart(virt_to_phys(cpu_reset), 0, addr);
+	else
+		cpu_soft_restart(virt_to_phys(cpu_reset),
+				 is_hyp_mode_available(), addr);
+
 	/* Should never get here */
 	BUG();
 }
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c507e25..b767032 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -26,6 +26,7 @@ 
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
 #include <asm/proc-macros.S>
+#include <asm/virt.h>
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
@@ -58,27 +59,48 @@  ENTRY(cpu_cache_off)
 ENDPROC(cpu_cache_off)
 
 /*
- *	cpu_reset(loc)
+ * cpu_reset(el2_switch, loc) - Helper for cpu_soft_restart.
  *
- *	Perform a soft reset of the system.  Put the CPU into the same state
- *	as it would be if it had been reset, and branch to what would be the
- *	reset vector. It must be executed with the flat identity mapping.
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @addr: Location to jump to for soft reset.
  *
- *	- loc   - location to jump to for soft reset
+ * Put the CPU into the same state as it would be if it had been reset, and
+ * branch to what would be the reset vector. It must be executed with the
+ * flat identity mapping.
  */
+
 	.align	5
+
 ENTRY(cpu_reset)
-	mrs	x1, sctlr_el1
-	bic	x1, x1, #1
-	msr	sctlr_el1, x1			// disable the MMU
+	mrs	x2, sctlr_el1
+	bic	x2, x2, #1
+	msr	sctlr_el1, x2			// disable the MMU
 	isb
-	ret	x0
+
+	cbz	x0, 1f				// el2_switch?
+	mov	x0, x1
+	mov	x1, xzr
+	mov	x2, xzr
+	mov	x3, xzr
+	hvc	#HVC_CALL_FUNC			// no return
+
+1:	ret	x1
 ENDPROC(cpu_reset)
 
+/*
+ * cpu_soft_restart(cpu_reset, el2_switch, addr) - Perform a cpu soft reset.
+ *
+ * @cpu_reset: Physical address of the cpu_reset routine.
+ * @el2_switch: Flag to indicate a swich to EL2 is needed, passed to cpu_reset.
+ * @addr: Location to jump to for soft reset, passed to cpu_reset.
+ *
+ */
+
 ENTRY(cpu_soft_restart)
-	/* Save address of cpu_reset() and reset address */
-	mov	x19, x0
-	mov	x20, x1
+	mov	x19, x0				// cpu_reset
+	mov	x20, x1				// el2_switch
+	mov	x21, x2				// addr
 
 	/* Turn D-cache off */
 	bl	cpu_cache_off
@@ -87,6 +109,7 @@  ENTRY(cpu_soft_restart)
 	bl	flush_cache_all
 
 	mov	x0, x20
+	mov	x1, x21
 	ret	x19
 ENDPROC(cpu_soft_restart)