1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM nVHE hypervisor stack tracing support.
5 * Copyright (C) 2022 Google LLC
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/memory.h>
10 #include <asm/percpu.h>
12 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
15 DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
18 * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
20 * @fp : frame pointer at which to start the unwinding.
21 * @pc : program counter at which to start the unwinding.
23 * Save the information needed by the host to unwind the non-protected
24 * nVHE hypervisor stack in EL1.
26 static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
28 struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
29 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
31 stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
32 stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
33 stacktrace_info->fp = fp;
34 stacktrace_info->pc = pc;
37 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
38 #include <asm/stacktrace/nvhe.h>
40 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
42 static struct stack_info stackinfo_get_overflow(void)
44 unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
45 unsigned long high = low + OVERFLOW_STACK_SIZE;
47 return (struct stack_info) {
53 static struct stack_info stackinfo_get_hyp(void)
55 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
56 unsigned long high = params->stack_hyp_va;
57 unsigned long low = high - PAGE_SIZE;
59 return (struct stack_info) {
65 static int unwind_next(struct unwind_state *state)
67 return unwind_next_frame_record(state);
70 static void notrace unwind(struct unwind_state *state,
71 stack_trace_consume_fn consume_entry,
77 if (!consume_entry(cookie, state->pc))
79 ret = unwind_next(state);
86 * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
88 * @arg : index of the entry in the stacktrace buffer
89 * @where : the program counter corresponding to the stack frame
91 * Save the return address of a stack frame to the shared stacktrace buffer.
92 * The host can access this shared buffer from EL1 to dump the backtrace.
94 static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
96 unsigned long *stacktrace = this_cpu_ptr(pkvm_stacktrace);
97 int *idx = (int *)arg;
100 * Need 2 free slots: 1 for current entry and 1 for the
103 if (*idx > ARRAY_SIZE(pkvm_stacktrace) - 2)
106 stacktrace[*idx] = where;
107 stacktrace[++*idx] = 0UL;
113 * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
115 * @fp : frame pointer at which to start the unwinding.
116 * @pc : program counter at which to start the unwinding.
118 * Save the unwinded stack addresses to the shared stacktrace buffer.
119 * The host can access this shared buffer from EL1 to dump the backtrace.
121 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
123 struct stack_info stacks[] = {
124 stackinfo_get_overflow(),
127 struct unwind_state state = {
129 .nr_stacks = ARRAY_SIZE(stacks),
133 kvm_nvhe_unwind_init(&state, fp, pc);
135 unwind(&state, pkvm_save_backtrace_entry, &idx);
137 #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
138 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
141 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
144 * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
146 * @fp : frame pointer at which to start the unwinding.
147 * @pc : program counter at which to start the unwinding.
149 * Saves the information needed by the host to dump the nVHE hypervisor
152 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
154 if (is_protected_kvm_enabled())
155 pkvm_save_backtrace(fp, pc);
157 hyp_prepare_backtrace(fp, pc);