Merge tag 'pipe-nonblock-2023-05-06' of git://git.kernel.dk/linux
[platform/kernel/linux-starfive.git] / arch / arm64 / kvm / hyp / nvhe / switch.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
9
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
15
16 #include <kvm/arm_psci.h>
17
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28
29 #include <nvhe/fixed_config.h>
30 #include <nvhe/mem_protect.h>
31
32 /* Non-VHE specific context */
33 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
35 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
36
37 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
38
39 static void __activate_traps(struct kvm_vcpu *vcpu)
40 {
41         u64 val;
42
43         ___activate_traps(vcpu);
44         __activate_traps_common(vcpu);
45
46         val = vcpu->arch.cptr_el2;
47         val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
48         if (!guest_owns_fp_regs(vcpu)) {
49                 val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
50                 __activate_traps_fpsimd32(vcpu);
51         }
52         if (cpus_have_final_cap(ARM64_SME))
53                 val |= CPTR_EL2_TSM;
54
55         write_sysreg(val, cptr_el2);
56         write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
57
58         if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
59                 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
60
61                 isb();
62                 /*
63                  * At this stage, and thanks to the above isb(), S2 is
64                  * configured and enabled. We can now restore the guest's S1
65                  * configuration: SCTLR, and only then TCR.
66                  */
67                 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
68                 isb();
69                 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),   SYS_TCR);
70         }
71 }
72
73 static void __deactivate_traps(struct kvm_vcpu *vcpu)
74 {
75         extern char __kvm_hyp_host_vector[];
76         u64 cptr;
77
78         ___deactivate_traps(vcpu);
79
80         if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
81                 u64 val;
82
83                 /*
84                  * Set the TCR and SCTLR registers in the exact opposite
85                  * sequence as __activate_traps (first prevent walks,
86                  * then force the MMU on). A generous sprinkling of isb()
87                  * ensure that things happen in this exact order.
88                  */
89                 val = read_sysreg_el1(SYS_TCR);
90                 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
91                 isb();
92                 val = read_sysreg_el1(SYS_SCTLR);
93                 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
94                 isb();
95         }
96
97         __deactivate_traps_common(vcpu);
98
99         write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
100
101         cptr = CPTR_EL2_DEFAULT;
102         if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
103                 cptr |= CPTR_EL2_TZ;
104         if (cpus_have_final_cap(ARM64_SME))
105                 cptr &= ~CPTR_EL2_TSM;
106
107         write_sysreg(cptr, cptr_el2);
108         write_sysreg(__kvm_hyp_host_vector, vbar_el2);
109 }
110
111 /* Save VGICv3 state on non-VHE systems */
112 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
113 {
114         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
115                 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
116                 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
117         }
118 }
119
120 /* Restore VGICv3 state on non-VHE systems */
121 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
122 {
123         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
124                 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
125                 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
126         }
127 }
128
129 /*
130  * Disable host events, enable guest events
131  */
132 #ifdef CONFIG_HW_PERF_EVENTS
133 static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
134 {
135         struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
136
137         if (pmu->events_host)
138                 write_sysreg(pmu->events_host, pmcntenclr_el0);
139
140         if (pmu->events_guest)
141                 write_sysreg(pmu->events_guest, pmcntenset_el0);
142
143         return (pmu->events_host || pmu->events_guest);
144 }
145
146 /*
147  * Disable guest events, enable host events
148  */
149 static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
150 {
151         struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
152
153         if (pmu->events_guest)
154                 write_sysreg(pmu->events_guest, pmcntenclr_el0);
155
156         if (pmu->events_host)
157                 write_sysreg(pmu->events_host, pmcntenset_el0);
158 }
159 #else
160 #define __pmu_switch_to_guest(v)        ({ false; })
161 #define __pmu_switch_to_host(v)         do {} while (0)
162 #endif
163
164 /*
165  * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
166  *
167  * Returns true if the hypervisor has handled the exit, and control should go
168  * back to the guest, or false if it hasn't.
169  */
170 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
171 {
172         /*
173          * Make sure we handle the exit for workarounds and ptrauth
174          * before the pKVM handling, as the latter could decide to
175          * UNDEF.
176          */
177         return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
178                 kvm_handle_pvm_sysreg(vcpu, exit_code));
179 }
180
181 static const exit_handler_fn hyp_exit_handlers[] = {
182         [0 ... ESR_ELx_EC_MAX]          = NULL,
183         [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
184         [ESR_ELx_EC_SYS64]              = kvm_hyp_handle_sysreg,
185         [ESR_ELx_EC_SVE]                = kvm_hyp_handle_fpsimd,
186         [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
187         [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
188         [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
189         [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
190 };
191
192 static const exit_handler_fn pvm_exit_handlers[] = {
193         [0 ... ESR_ELx_EC_MAX]          = NULL,
194         [ESR_ELx_EC_SYS64]              = kvm_handle_pvm_sys64,
195         [ESR_ELx_EC_SVE]                = kvm_handle_pvm_restricted,
196         [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
197         [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
198         [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
199         [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
200 };
201
202 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
203 {
204         if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
205                 return pvm_exit_handlers;
206
207         return hyp_exit_handlers;
208 }
209
210 /*
211  * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
212  * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
213  * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
214  * hypervisor spots a guest in such a state ensure it is handled, and don't
215  * trust the host to spot or fix it.  The check below is based on the one in
216  * kvm_arch_vcpu_ioctl_run().
217  *
218  * Returns false if the guest ran in AArch32 when it shouldn't have, and
219  * thus should exit to the host, or true if a the guest run loop can continue.
220  */
221 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
222 {
223         struct kvm *kvm = kern_hyp_va(vcpu->kvm);
224
225         if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
226                 /*
227                  * As we have caught the guest red-handed, decide that it isn't
228                  * fit for purpose anymore by making the vcpu invalid. The VMM
229                  * can try and fix it by re-initializing the vcpu with
230                  * KVM_ARM_VCPU_INIT, however, this is likely not possible for
231                  * protected VMs.
232                  */
233                 vcpu->arch.target = -1;
234                 *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
235                 *exit_code |= ARM_EXCEPTION_IL;
236         }
237 }
238
239 /* Switch to the guest for legacy non-VHE systems */
240 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
241 {
242         struct kvm_cpu_context *host_ctxt;
243         struct kvm_cpu_context *guest_ctxt;
244         struct kvm_s2_mmu *mmu;
245         bool pmu_switch_needed;
246         u64 exit_code;
247
248         /*
249          * Having IRQs masked via PMR when entering the guest means the GIC
250          * will not signal the CPU of interrupts of lower priority, and the
251          * only way to get out will be via guest exceptions.
252          * Naturally, we want to avoid this.
253          */
254         if (system_uses_irq_prio_masking()) {
255                 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
256                 pmr_sync();
257         }
258
259         host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
260         host_ctxt->__hyp_running_vcpu = vcpu;
261         guest_ctxt = &vcpu->arch.ctxt;
262
263         pmu_switch_needed = __pmu_switch_to_guest(vcpu);
264
265         __sysreg_save_state_nvhe(host_ctxt);
266         /*
267          * We must flush and disable the SPE buffer for nVHE, as
268          * the translation regime(EL1&0) is going to be loaded with
269          * that of the guest. And we must do this before we change the
270          * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
271          * before we load guest Stage1.
272          */
273         __debug_save_host_buffers_nvhe(vcpu);
274
275         /*
276          * We're about to restore some new MMU state. Make sure
277          * ongoing page-table walks that have started before we
278          * trapped to EL2 have completed. This also synchronises the
279          * above disabling of SPE and TRBE.
280          *
281          * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
282          * rule R_LFHQG and subsequent information statements.
283          */
284         dsb(nsh);
285
286         __kvm_adjust_pc(vcpu);
287
288         /*
289          * We must restore the 32-bit state before the sysregs, thanks
290          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
291          *
292          * Also, and in order to be able to deal with erratum #1319537 (A57)
293          * and #1319367 (A72), we must ensure that all VM-related sysreg are
294          * restored before we enable S2 translation.
295          */
296         __sysreg32_restore_state(vcpu);
297         __sysreg_restore_state_nvhe(guest_ctxt);
298
299         mmu = kern_hyp_va(vcpu->arch.hw_mmu);
300         __load_stage2(mmu, kern_hyp_va(mmu->arch));
301         __activate_traps(vcpu);
302
303         __hyp_vgic_restore_state(vcpu);
304         __timer_enable_traps(vcpu);
305
306         __debug_switch_to_guest(vcpu);
307
308         do {
309                 /* Jump in the fire! */
310                 exit_code = __guest_enter(vcpu);
311
312                 /* And we're baaack! */
313         } while (fixup_guest_exit(vcpu, &exit_code));
314
315         __sysreg_save_state_nvhe(guest_ctxt);
316         __sysreg32_save_state(vcpu);
317         __timer_disable_traps(vcpu);
318         __hyp_vgic_save_state(vcpu);
319
320         /*
321          * Same thing as before the guest run: we're about to switch
322          * the MMU context, so let's make sure we don't have any
323          * ongoing EL1&0 translations.
324          */
325         dsb(nsh);
326
327         __deactivate_traps(vcpu);
328         __load_host_stage2();
329
330         __sysreg_restore_state_nvhe(host_ctxt);
331
332         if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
333                 __fpsimd_save_fpexc32(vcpu);
334
335         __debug_switch_to_host(vcpu);
336         /*
337          * This must come after restoring the host sysregs, since a non-VHE
338          * system may enable SPE here and make use of the TTBRs.
339          */
340         __debug_restore_host_buffers_nvhe(vcpu);
341
342         if (pmu_switch_needed)
343                 __pmu_switch_to_host(vcpu);
344
345         /* Returning to host will clear PSR.I, remask PMR if needed */
346         if (system_uses_irq_prio_masking())
347                 gic_write_pmr(GIC_PRIO_IRQOFF);
348
349         host_ctxt->__hyp_running_vcpu = NULL;
350
351         return exit_code;
352 }
353
354 asmlinkage void __noreturn hyp_panic(void)
355 {
356         u64 spsr = read_sysreg_el2(SYS_SPSR);
357         u64 elr = read_sysreg_el2(SYS_ELR);
358         u64 par = read_sysreg_par();
359         struct kvm_cpu_context *host_ctxt;
360         struct kvm_vcpu *vcpu;
361
362         host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
363         vcpu = host_ctxt->__hyp_running_vcpu;
364
365         if (vcpu) {
366                 __timer_disable_traps(vcpu);
367                 __deactivate_traps(vcpu);
368                 __load_host_stage2();
369                 __sysreg_restore_state_nvhe(host_ctxt);
370         }
371
372         /* Prepare to dump kvm nvhe hyp stacktrace */
373         kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
374                                    _THIS_IP_);
375
376         __hyp_do_panic(host_ctxt, spsr, elr, par);
377         unreachable();
378 }
379
380 asmlinkage void __noreturn hyp_panic_bad_stack(void)
381 {
382         hyp_panic();
383 }
384
385 asmlinkage void kvm_unexpected_el2_exception(void)
386 {
387         __kvm_unexpected_el2_exception();
388 }