diff mbox series

[v5,16/17] KVM: arm64: Introduce pkvm_dump_backtrace()

Message ID 20220721055728.718573-17-kaleshsingh@google.com (mailing list archive)
State New, archived
Headers show
Series KVM nVHE Hypervisor stack unwinder | expand

Commit Message

Kalesh Singh July 21, 2022, 5:57 a.m. UTC
Dumps the pKVM hypervisor backtrace from EL1 by reading the unwinded
addresses from the shared stacktrace buffer.

The nVHE hyp backtrace is dumped on hyp_panic(), before panicking the
host.

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---

Changes in v5:
  - Move code out from nvhe.h header to handle_exit.c, per Marc
  - Fix stacktrace symoblization when CONFIG_RAMDOMIZE_BASE is enabled,
    per Fuad
  - Use regular comments instead of doc comments, per Fuad

 arch/arm64/kvm/handle_exit.c | 54 ++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

Comments

Fuad Tabba July 21, 2022, 9:59 a.m. UTC | #1
Hi Kalesh,

On Thu, Jul 21, 2022 at 6:58 AM Kalesh Singh <kaleshsingh@google.com> wrote:
>
> Dumps the pKVM hypervisor backtrace from EL1 by reading the unwinded
> addresses from the shared stacktrace buffer.
>
> The nVHE hyp backtrace is dumped on hyp_panic(), before panicking the
> host.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
> ---
>
> Changes in v5:
>   - Move code out from nvhe.h header to handle_exit.c, per Marc
>   - Fix stacktrace symoblization when CONFIG_RAMDOMIZE_BASE is enabled,
>     per Fuad
>   - Use regular comments instead of doc comments, per Fuad
>
>  arch/arm64/kvm/handle_exit.c | 54 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 54 insertions(+)
>
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index f66c0142b335..ad568da5c7d7 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -318,6 +318,57 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
>                 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
>  }
>
> +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
> +DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
> +                        pkvm_stacktrace);
> +
> +/*
> + * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
> + *
> + * @hyp_offset: hypervisor offset, used for address translation.
> + *
> + * Dumping of the pKVM HYP backtrace is done by reading the
> + * stack addresses from the shared stacktrace buffer, since the
> + * host cannot direclty access hyperviosr memory in protected

directly and hypervisor

Reviewed-by: Fuad Tabba <tabba@google.com>

Cheers,
/fuad


> + * mode.
> + */
> +static void pkvm_dump_backtrace(unsigned long hyp_offset)
> +{
> +       unsigned long *stacktrace_entry
> +               = (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
> +       unsigned long va_mask, pc;
> +
> +       va_mask = GENMASK_ULL(vabits_actual - 1, 0);
> +
> +       kvm_err("Protected nVHE HYP call trace:\n");
> +
> +       /* The stack trace is terminated by a null entry */
> +       for (; *stacktrace_entry; stacktrace_entry++) {
> +               /* Mask tags and convert to kern addr */
> +               pc = (*stacktrace_entry & va_mask) + hyp_offset;
> +               kvm_err(" [<%016lx>] %pB\n", pc, (void *)(pc + kaslr_offset()));
> +       }
> +
> +       kvm_err("---- End of Protected nVHE HYP call trace ----\n");
> +}
> +#else  /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
> +static void pkvm_dump_backtrace(unsigned long hyp_offset)
> +{
> +       kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
> +}
> +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
> +
> +/*
> + * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
> + *
> + * @hyp_offset: hypervisor offset, used for address translation.
> + */
> +static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
> +{
> +       if (is_protected_kvm_enabled())
> +               pkvm_dump_backtrace(hyp_offset);
> +}
> +
>  void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
>                                               u64 elr_virt, u64 elr_phys,
>                                               u64 par, uintptr_t vcpu,
> @@ -353,6 +404,9 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
>                                 (void *)panic_addr);
>         }
>
> +       /* Dump the nVHE hypervisor backtrace */
> +       kvm_nvhe_dump_backtrace(hyp_offset);
> +
>         /*
>          * Hyp has panicked and we're going to handle that by panicking the
>          * kernel. The kernel offset will be revealed in the panic so we're
> --
> 2.37.0.170.g444d1eabd0-goog
>
Oliver Upton July 22, 2022, 11:16 a.m. UTC | #2
Hi Kalesh,

On Wed, Jul 20, 2022 at 10:57:27PM -0700, Kalesh Singh wrote:

[...]

> +/*
> + * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
> + *
> + * @hyp_offset: hypervisor offset, used for address translation.
> + *
> + * Dumping of the pKVM HYP backtrace is done by reading the
> + * stack addresses from the shared stacktrace buffer, since the
> + * host cannot direclty access hyperviosr memory in protected
> + * mode.
> + */
> +static void pkvm_dump_backtrace(unsigned long hyp_offset)
> +{
> +	unsigned long *stacktrace_entry
> +		= (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
> +	unsigned long va_mask, pc;
> +
> +	va_mask = GENMASK_ULL(vabits_actual - 1, 0);
> +
> +	kvm_err("Protected nVHE HYP call trace:\n");

This and the footer printks should be put in respective helpers to share
between pKVM and non-pKVM backtrace implementations. I imagine users
will invariably bake some pattern matching to scrape traces, and it
should be consistent between both flavors.

> +	/* The stack trace is terminated by a null entry */
> +	for (; *stacktrace_entry; stacktrace_entry++) {

At the point we're dumping the backtrace we know that EL2 has already
soiled itself, so we shouldn't explicitly depend on it providing NULL
terminators. I believe this loop should have an explicit range && NULL
check.

--
Thanks,
Oliver
Kalesh Singh July 22, 2022, 5:25 p.m. UTC | #3
On Fri, Jul 22, 2022 at 4:16 AM Oliver Upton <oliver.upton@linux.dev> wrote:
>
> Hi Kalesh,
>
> On Wed, Jul 20, 2022 at 10:57:27PM -0700, Kalesh Singh wrote:
>
> [...]
>
> > +/*
> > + * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
> > + *
> > + * @hyp_offset: hypervisor offset, used for address translation.
> > + *
> > + * Dumping of the pKVM HYP backtrace is done by reading the
> > + * stack addresses from the shared stacktrace buffer, since the
> > + * host cannot direclty access hyperviosr memory in protected
> > + * mode.
> > + */
> > +static void pkvm_dump_backtrace(unsigned long hyp_offset)
> > +{
> > +     unsigned long *stacktrace_entry
> > +             = (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
> > +     unsigned long va_mask, pc;
> > +
> > +     va_mask = GENMASK_ULL(vabits_actual - 1, 0);
> > +
> > +     kvm_err("Protected nVHE HYP call trace:\n");
>
> This and the footer printks should be put in respective helpers to share
> between pKVM and non-pKVM backtrace implementations. I imagine users
> will invariably bake some pattern matching to scrape traces, and it
> should be consistent between both flavors.

Hi Oliver,

Ok will split these out into helpers.

>
> > +     /* The stack trace is terminated by a null entry */
> > +     for (; *stacktrace_entry; stacktrace_entry++) {
>
> At the point we're dumping the backtrace we know that EL2 has already
> soiled itself, so we shouldn't explicitly depend on it providing NULL
> terminators. I believe this loop should have an explicit range && NULL
> check.

Good point, I'll add the additional checks in the next version,

Thanks,
Kalesh
>
> --
> Thanks,
> Oliver
diff mbox series

Patch

diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index f66c0142b335..ad568da5c7d7 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -318,6 +318,57 @@  void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
 }
 
+#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
+DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
+			 pkvm_stacktrace);
+
+/*
+ * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ *
+ * Dumping of the pKVM HYP backtrace is done by reading the
+ * stack addresses from the shared stacktrace buffer, since the
+ * host cannot direclty access hyperviosr memory in protected
+ * mode.
+ */
+static void pkvm_dump_backtrace(unsigned long hyp_offset)
+{
+	unsigned long *stacktrace_entry
+		= (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
+	unsigned long va_mask, pc;
+
+	va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+
+	kvm_err("Protected nVHE HYP call trace:\n");
+
+	/* The stack trace is terminated by a null entry */
+	for (; *stacktrace_entry; stacktrace_entry++) {
+		/* Mask tags and convert to kern addr */
+		pc = (*stacktrace_entry & va_mask) + hyp_offset;
+		kvm_err(" [<%016lx>] %pB\n", pc, (void *)(pc + kaslr_offset()));
+	}
+
+	kvm_err("---- End of Protected nVHE HYP call trace ----\n");
+}
+#else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
+static void pkvm_dump_backtrace(unsigned long hyp_offset)
+{
+	kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
+}
+#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
+
+/*
+ * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ */
+static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
+{
+	if (is_protected_kvm_enabled())
+		pkvm_dump_backtrace(hyp_offset);
+}
+
 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
 					      u64 elr_virt, u64 elr_phys,
 					      u64 par, uintptr_t vcpu,
@@ -353,6 +404,9 @@  void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
 				(void *)panic_addr);
 	}
 
+	/* Dump the nVHE hypervisor backtrace */
+	kvm_nvhe_dump_backtrace(hyp_offset);
+
 	/*
 	 * Hyp has panicked and we're going to handle that by panicking the
 	 * kernel. The kernel offset will be revealed in the panic so we're