Message ID | 20220721055728.718573-11-kaleshsingh@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM nVHE Hypervisor stack unwinder | expand |
Hi Kalesh, On Thu, Jul 21, 2022 at 6:57 AM Kalesh Singh <kaleshsingh@google.com> wrote: > > Add some stub implementations of protected nVHE stack unwinder, for > building. These are implemented later in this series. > > Signed-off-by: Kalesh Singh <kaleshsingh@google.com> > --- Reviewed-by: Fuad Tabba <tabba@google.com> Cheers, /fuad > > Changes in v5: > - Mark unwind_next() as inline, per Marc > > arch/arm64/include/asm/stacktrace/nvhe.h | 59 ++++++++++++++++++++++++ > arch/arm64/kvm/hyp/nvhe/stacktrace.c | 3 +- > 2 files changed, 60 insertions(+), 2 deletions(-) > create mode 100644 arch/arm64/include/asm/stacktrace/nvhe.h > > diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h > new file mode 100644 > index 000000000000..80d71932afff > --- /dev/null > +++ b/arch/arm64/include/asm/stacktrace/nvhe.h > @@ -0,0 +1,59 @@ > +/* SPDX-License-Identifier: GPL-2.0-only */ > +/* > + * KVM nVHE hypervisor stack tracing support. > + * > + * The unwinder implementation depends on the nVHE mode: > + * > + * 1) pKVM (protected nVHE) mode - the host cannot directly access > + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared > + * buffer where the host can read and print the stacktrace. > + * > + * Copyright (C) 2022 Google LLC > + */ > +#ifndef __ASM_STACKTRACE_NVHE_H > +#define __ASM_STACKTRACE_NVHE_H > + > +#include <asm/stacktrace/common.h> > + > +static inline bool on_accessible_stack(const struct task_struct *tsk, > + unsigned long sp, unsigned long size, > + struct stack_info *info) > +{ > + return false; > +} > + > +#ifdef __KVM_NVHE_HYPERVISOR__ > +/* > + * Protected nVHE HYP stack unwinder > + * > + * In protected mode, the unwinding is done by the hypervisor in EL2. > + */ > + > +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE > +static inline bool on_overflow_stack(unsigned long sp, unsigned long size, > + struct stack_info *info) > +{ > + return false; > +} > + > +static inline int notrace unwind_next(struct unwind_state *state) > +{ > + return 0; > +} > +NOKPROBE_SYMBOL(unwind_next); > +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ > +static inline bool on_overflow_stack(unsigned long sp, unsigned long size, > + struct stack_info *info) > +{ > + return false; > +} > + > +static inline int notrace unwind_next(struct unwind_state *state) > +{ > + return 0; > +} > +NOKPROBE_SYMBOL(unwind_next); > +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ > + > +#endif /* __KVM_NVHE_HYPERVISOR__ */ > +#endif /* __ASM_STACKTRACE_NVHE_H */ > diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c > index 69e65b457f1c..96c8b93320eb 100644 > --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c > +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c > @@ -4,8 +4,7 @@ > * > * Copyright (C) 2022 Google LLC > */ > -#include <asm/memory.h> > -#include <asm/percpu.h> > +#include <asm/stacktrace/nvhe.h> > > DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) > __aligned(16); > -- > 2.37.0.170.g444d1eabd0-goog >
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h new file mode 100644 index 000000000000..80d71932afff --- /dev/null +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * KVM nVHE hypervisor stack tracing support. + * + * The unwinder implementation depends on the nVHE mode: + * + * 1) pKVM (protected nVHE) mode - the host cannot directly access + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared + * buffer where the host can read and print the stacktrace. + * + * Copyright (C) 2022 Google LLC + */ +#ifndef __ASM_STACKTRACE_NVHE_H +#define __ASM_STACKTRACE_NVHE_H + +#include <asm/stacktrace/common.h> + +static inline bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size, + struct stack_info *info) +{ + return false; +} + +#ifdef __KVM_NVHE_HYPERVISOR__ +/* + * Protected nVHE HYP stack unwinder + * + * In protected mode, the unwinding is done by the hypervisor in EL2. + */ + +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE +static inline bool on_overflow_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + return false; +} + +static inline int notrace unwind_next(struct unwind_state *state) +{ + return 0; +} +NOKPROBE_SYMBOL(unwind_next); +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ +static inline bool on_overflow_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + return false; +} + +static inline int notrace unwind_next(struct unwind_state *state) +{ + return 0; +} +NOKPROBE_SYMBOL(unwind_next); +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ + +#endif /* __KVM_NVHE_HYPERVISOR__ */ +#endif /* __ASM_STACKTRACE_NVHE_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 69e65b457f1c..96c8b93320eb 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -4,8 +4,7 @@ * * Copyright (C) 2022 Google LLC */ -#include <asm/memory.h> -#include <asm/percpu.h> +#include <asm/stacktrace/nvhe.h> DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) __aligned(16);
Add some stub implementations of protected nVHE stack unwinder, for building. These are implemented later in this series. Signed-off-by: Kalesh Singh <kaleshsingh@google.com> --- Changes in v5: - Mark unwind_next() as inline, per Marc arch/arm64/include/asm/stacktrace/nvhe.h | 59 ++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/stacktrace.c | 3 +- 2 files changed, 60 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/include/asm/stacktrace/nvhe.h