KVM: arm64: Make unwind()/on_accessible_stack() per-unwinder functions
[platform/kernel/linux-starfive.git] / arch / arm64 / kvm / hyp / nvhe / stacktrace.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM nVHE hypervisor stack tracing support.
4  *
5  * Copyright (C) 2022 Google LLC
6  */
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/memory.h>
10 #include <asm/percpu.h>
11
12 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
13         __aligned(16);
14
15 DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
16
17 /*
18  * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
19  *
20  * @fp : frame pointer at which to start the unwinding.
21  * @pc : program counter at which to start the unwinding.
22  *
23  * Save the information needed by the host to unwind the non-protected
24  * nVHE hypervisor stack in EL1.
25  */
26 static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
27 {
28         struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
29         struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
30
31         stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
32         stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
33         stacktrace_info->fp = fp;
34         stacktrace_info->pc = pc;
35 }
36
37 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
38 #include <asm/stacktrace/nvhe.h>
39
40 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
41
42 static bool on_overflow_stack(unsigned long sp, unsigned long size,
43                               struct stack_info *info)
44 {
45         unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
46         unsigned long high = low + OVERFLOW_STACK_SIZE;
47
48         return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
49 }
50
51 static bool on_hyp_stack(unsigned long sp, unsigned long size,
52                               struct stack_info *info)
53 {
54         struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
55         unsigned long high = params->stack_hyp_va;
56         unsigned long low = high - PAGE_SIZE;
57
58         return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
59 }
60
61 static bool on_accessible_stack(const struct task_struct *tsk,
62                                 unsigned long sp, unsigned long size,
63                                 struct stack_info *info)
64 {
65         if (info)
66                 info->type = STACK_TYPE_UNKNOWN;
67
68         return (on_overflow_stack(sp, size, info) ||
69                 on_hyp_stack(sp, size, info));
70 }
71
72 static int unwind_next(struct unwind_state *state)
73 {
74         struct stack_info info;
75
76         return unwind_next_common(state, &info, on_accessible_stack, NULL);
77 }
78
79 static void notrace unwind(struct unwind_state *state,
80                            stack_trace_consume_fn consume_entry,
81                            void *cookie)
82 {
83         while (1) {
84                 int ret;
85
86                 if (!consume_entry(cookie, state->pc))
87                         break;
88                 ret = unwind_next(state);
89                 if (ret < 0)
90                         break;
91         }
92 }
93
94 /*
95  * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
96  *
97  * @arg    : index of the entry in the stacktrace buffer
98  * @where  : the program counter corresponding to the stack frame
99  *
100  * Save the return address of a stack frame to the shared stacktrace buffer.
101  * The host can access this shared buffer from EL1 to dump the backtrace.
102  */
103 static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
104 {
105         unsigned long *stacktrace = this_cpu_ptr(pkvm_stacktrace);
106         int size = NVHE_STACKTRACE_SIZE / sizeof(long);
107         int *idx = (int *)arg;
108
109         /*
110          * Need 2 free slots: 1 for current entry and 1 for the
111          * delimiter.
112          */
113         if (*idx > size - 2)
114                 return false;
115
116         stacktrace[*idx] = where;
117         stacktrace[++*idx] = 0UL;
118
119         return true;
120 }
121
122 /*
123  * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
124  *
125  * @fp : frame pointer at which to start the unwinding.
126  * @pc : program counter at which to start the unwinding.
127  *
128  * Save the unwinded stack addresses to the shared stacktrace buffer.
129  * The host can access this shared buffer from EL1 to dump the backtrace.
130  */
131 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
132 {
133         struct unwind_state state;
134         int idx = 0;
135
136         kvm_nvhe_unwind_init(&state, fp, pc);
137
138         unwind(&state, pkvm_save_backtrace_entry, &idx);
139 }
140 #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
141 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
142 {
143 }
144 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
145
146 /*
147  * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
148  *
149  * @fp : frame pointer at which to start the unwinding.
150  * @pc : program counter at which to start the unwinding.
151  *
152  * Saves the information needed by the host to dump the nVHE hypervisor
153  * backtrace.
154  */
155 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
156 {
157         if (is_protected_kvm_enabled())
158                 pkvm_save_backtrace(fp, pc);
159         else
160                 hyp_prepare_backtrace(fp, pc);
161 }