From 9f5fee05f6897d0fe0e3a44ade71bb85cd97b2ef Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Jul 2022 15:29:02 +0100 Subject: [PATCH] KVM: arm64: Move nVHE stacktrace unwinding into its own compilation unit The unwinding code doesn't really belong to the exit handling code. Instead, move it to a file (conveniently named stacktrace.c to confuse the reviewer), and move all the stacktrace-related stuff there. It will be joined by more code very soon. Signed-off-by: Marc Zyngier Reviewed-by: Kalesh Singh Tested-by: Kalesh Singh Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20220727142906.1856759-3-maz@kernel.org --- arch/arm64/include/asm/stacktrace/nvhe.h | 2 + arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/handle_exit.c | 98 ------------------------- arch/arm64/kvm/stacktrace.c | 120 +++++++++++++++++++++++++++++++ 4 files changed, 123 insertions(+), 99 deletions(-) create mode 100644 arch/arm64/kvm/stacktrace.c diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h index 600dbc2..8a5cb96 100644 --- a/arch/arm64/include/asm/stacktrace/nvhe.h +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -172,5 +172,7 @@ static inline int notrace unwind_next(struct unwind_state *state) } NOKPROBE_SYMBOL(unwind_next); +void kvm_nvhe_dump_backtrace(unsigned long hyp_offset); + #endif /* __KVM_NVHE_HYPERVISOR__ */ #endif /* __ASM_STACKTRACE_NVHE_H */ diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index aa127ae..5e33c2d 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_KVM) += hyp/ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ inject_fault.o va_layout.o handle_exit.o \ - guest.o debug.o reset.o sys_regs.o \ + guest.o debug.o reset.o sys_regs.o stacktrace.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \ arch_timer.o trng.o vmid.o \ vgic/vgic.o vgic/vgic-init.o \ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index c14fc4b..ef8b579 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -319,104 +319,6 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu)); } -/* - * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry - * - * @arg : the hypervisor offset, used for address translation - * @where : the program counter corresponding to the stack frame - */ -static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) -{ - unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); - unsigned long hyp_offset = (unsigned long)arg; - - /* Mask tags and convert to kern addr */ - where = (where & va_mask) + hyp_offset; - kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset())); - - return true; -} - -static inline void kvm_nvhe_dump_backtrace_start(void) -{ - kvm_err("nVHE call trace:\n"); -} - -static inline void kvm_nvhe_dump_backtrace_end(void) -{ - kvm_err("---[ end nVHE call trace ]---\n"); -} - -/* - * hyp_dump_backtrace - Dump the non-protected nVHE backtrace. - * - * @hyp_offset: hypervisor offset, used for address translation. - * - * The host can directly access HYP stack pages in non-protected - * mode, so the unwinding is done directly from EL1. This removes - * the need for shared buffers between host and hypervisor for - * the stacktrace. - */ -static void hyp_dump_backtrace(unsigned long hyp_offset) -{ - struct kvm_nvhe_stacktrace_info *stacktrace_info; - struct unwind_state state; - - stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - - kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc); - - kvm_nvhe_dump_backtrace_start(); - unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset); - kvm_nvhe_dump_backtrace_end(); -} - -#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE -DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], - pkvm_stacktrace); - -/* - * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace. - * - * @hyp_offset: hypervisor offset, used for address translation. - * - * Dumping of the pKVM HYP backtrace is done by reading the - * stack addresses from the shared stacktrace buffer, since the - * host cannot directly access hypervisor memory in protected - * mode. - */ -static void pkvm_dump_backtrace(unsigned long hyp_offset) -{ - unsigned long *stacktrace - = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace); - int i, size = NVHE_STACKTRACE_SIZE / sizeof(long); - - kvm_nvhe_dump_backtrace_start(); - /* The saved stacktrace is terminated by a null entry */ - for (i = 0; i < size && stacktrace[i]; i++) - kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]); - kvm_nvhe_dump_backtrace_end(); -} -#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ -static void pkvm_dump_backtrace(unsigned long hyp_offset) -{ - kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n"); -} -#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ - -/* - * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace. - * - * @hyp_offset: hypervisor offset, used for address translation. - */ -static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset) -{ - if (is_protected_kvm_enabled()) - pkvm_dump_backtrace(hyp_offset); - else - hyp_dump_backtrace(hyp_offset); -} - void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, u64 elr_phys, u64 par, uintptr_t vcpu, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c new file mode 100644 index 0000000..9812aef --- /dev/null +++ b/arch/arm64/kvm/stacktrace.c @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * KVM nVHE hypervisor stack tracing support. + * + * The unwinder implementation depends on the nVHE mode: + * + * 1) Non-protected nVHE mode - the host can directly access the + * HYP stack pages and unwind the HYP stack in EL1. This saves having + * to allocate shared buffers for the host to read the unwinded + * stacktrace. + * + * 2) pKVM (protected nVHE) mode - the host cannot directly access + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared + * buffer where the host can read and print the stacktrace. + * + * Copyright (C) 2022 Google LLC + */ + +#include +#include + +#include + +/* + * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry + * + * @arg : the hypervisor offset, used for address translation + * @where : the program counter corresponding to the stack frame + */ +static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) +{ + unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); + unsigned long hyp_offset = (unsigned long)arg; + + /* Mask tags and convert to kern addr */ + where = (where & va_mask) + hyp_offset; + kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset())); + + return true; +} + +static void kvm_nvhe_dump_backtrace_start(void) +{ + kvm_err("nVHE call trace:\n"); +} + +static void kvm_nvhe_dump_backtrace_end(void) +{ + kvm_err("---[ end nVHE call trace ]---\n"); +} + +/* + * hyp_dump_backtrace - Dump the non-protected nVHE backtrace. + * + * @hyp_offset: hypervisor offset, used for address translation. + * + * The host can directly access HYP stack pages in non-protected + * mode, so the unwinding is done directly from EL1. This removes + * the need for shared buffers between host and hypervisor for + * the stacktrace. + */ +static void hyp_dump_backtrace(unsigned long hyp_offset) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info; + struct unwind_state state; + + stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + + kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc); + + kvm_nvhe_dump_backtrace_start(); + unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset); + kvm_nvhe_dump_backtrace_end(); +} + +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE +DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], + pkvm_stacktrace); + +/* + * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace. + * + * @hyp_offset: hypervisor offset, used for address translation. + * + * Dumping of the pKVM HYP backtrace is done by reading the + * stack addresses from the shared stacktrace buffer, since the + * host cannot directly access hypervisor memory in protected + * mode. + */ +static void pkvm_dump_backtrace(unsigned long hyp_offset) +{ + unsigned long *stacktrace + = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace); + int i, size = NVHE_STACKTRACE_SIZE / sizeof(long); + + kvm_nvhe_dump_backtrace_start(); + /* The saved stacktrace is terminated by a null entry */ + for (i = 0; i < size && stacktrace[i]; i++) + kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]); + kvm_nvhe_dump_backtrace_end(); +} +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ +static void pkvm_dump_backtrace(unsigned long hyp_offset) +{ + kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n"); +} +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ + +/* + * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace. + * + * @hyp_offset: hypervisor offset, used for address translation. + */ +void kvm_nvhe_dump_backtrace(unsigned long hyp_offset) +{ + if (is_protected_kvm_enabled()) + pkvm_dump_backtrace(hyp_offset); + else + hyp_dump_backtrace(hyp_offset); +} -- 2.7.4