1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
7 #include <linux/linkage.h>
9 #include <asm/assembler.h>
10 #include <asm/kvm_arm.h>
11 #include <asm/kvm_asm.h>
12 #include <asm/kvm_mmu.h>
16 SYM_FUNC_START(__host_exit)
19 /* Store the host regs x2 and x3 */
20 stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
22 /* Retrieve the host regs x0-x1 from the stack */
23 ldp x2, x3, [sp], #16 // x0, x1
25 /* Store the host regs x0-x1 and x4-x17 */
26 stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
27 stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
28 stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
29 stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
30 stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
31 stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
32 stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
33 stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
35 /* Store the host regs x18-x29, lr */
36 save_callee_saved_regs x0
38 /* Save the host context pointer in x29 across the function call */
42 /* Restore host regs x0-x17 */
43 __host_enter_restore_full:
44 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
45 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
46 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
47 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
49 /* x0-7 are use for panic arguments */
50 __host_enter_for_panic:
51 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
52 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
53 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
54 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
55 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
57 /* Restore host regs x18-x29, lr */
58 restore_callee_saved_regs x29
60 /* Do not touch any register after this! */
61 __host_enter_without_restoring:
64 SYM_FUNC_END(__host_exit)
67 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
69 SYM_FUNC_START(__host_enter)
71 b __host_enter_restore_full
72 SYM_FUNC_END(__host_enter)
75 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
78 SYM_FUNC_START(__hyp_do_panic)
79 /* Prepare and exit to the host's panic funciton. */
80 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
83 ldr lr, =nvhe_hyp_panic_handler
89 #ifdef CONFIG_NVHE_EL2_DEBUG
90 /* Ensure host stage-2 is disabled */
99 /* Load the panic arguments into x0-7 */
108 /* Enter the host, conditionally restoring the host context. */
109 cbz x29, __host_enter_without_restoring
110 b __host_enter_for_panic
111 SYM_FUNC_END(__hyp_do_panic)
113 SYM_FUNC_START(__host_hvc)
114 ldp x0, x1, [sp] // Don't fixup the stack yet
116 /* No stub for you, sonny Jim */
117 alternative_if ARM64_KVM_PROTECTED_MODE
119 alternative_else_nop_endif
121 /* Check for a stub HVC call */
122 cmp x0, #HVC_STUB_HCALL_NR
127 * Compute the idmap address of __kvm_handle_stub_hvc and
128 * jump there. Since we use kimage_voffset, do not use the
129 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
130 * (by loading it from the constant pool).
132 * Preserve x0-x4, which may contain stub parameters.
134 ldr x5, =__kvm_handle_stub_hvc
137 SYM_FUNC_END(__host_hvc)
139 .macro host_el1_sync_vect
142 stp x0, x1, [sp, #-16]!
144 ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
145 cmp x0, #ESR_ELx_EC_HVC64
149 .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
150 .error "host_el1_sync_vect larger than vector entry"
154 .macro invalid_host_el2_vect
156 /* If a guest is loaded, panic out of it. */
157 stp x0, x1, [sp, #-16]!
158 get_loaded_vcpu x0, x1
159 cbnz x0, __guest_exit_panic
163 * The panic may not be clean if the exception is taken before the host
164 * context has been saved by __host_exit or after the hyp context has
165 * been partially clobbered by __host_enter.
170 .macro invalid_host_el1_vect
172 mov x0, xzr /* restore_host = false */
180 * The host vector does not use an ESB instruction in order to avoid consuming
181 * SErrors that should only be consumed by the host. Guest entry is deferred by
182 * __guest_enter if there are any pending asynchronous exceptions so hyp will
183 * always return to the host without having consumerd host SErrors.
185 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
186 * host knows about the EL2 vectors already, and there is no point in hiding
190 SYM_CODE_START(__kvm_hyp_host_vector)
191 invalid_host_el2_vect // Synchronous EL2t
192 invalid_host_el2_vect // IRQ EL2t
193 invalid_host_el2_vect // FIQ EL2t
194 invalid_host_el2_vect // Error EL2t
196 invalid_host_el2_vect // Synchronous EL2h
197 invalid_host_el2_vect // IRQ EL2h
198 invalid_host_el2_vect // FIQ EL2h
199 invalid_host_el2_vect // Error EL2h
201 host_el1_sync_vect // Synchronous 64-bit EL1
202 invalid_host_el1_vect // IRQ 64-bit EL1
203 invalid_host_el1_vect // FIQ 64-bit EL1
204 invalid_host_el1_vect // Error 64-bit EL1
206 invalid_host_el1_vect // Synchronous 32-bit EL1
207 invalid_host_el1_vect // IRQ 32-bit EL1
208 invalid_host_el1_vect // FIQ 32-bit EL1
209 invalid_host_el1_vect // Error 32-bit EL1
210 SYM_CODE_END(__kvm_hyp_host_vector)
213 * Forward SMC with arguments in struct kvm_cpu_context, and
214 * store the result into the same struct. Assumes SMCCC 1.2 or older.
216 * x0: struct kvm_cpu_context*
218 SYM_CODE_START(__kvm_hyp_host_forward_smc)
220 * Use x18 to keep the pointer to the host context because
221 * x18 is callee-saved in SMCCC but not in AAPCS64.
225 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
226 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
227 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
228 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
229 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
230 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
231 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
232 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
233 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
237 stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
238 stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
239 stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
240 stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
241 stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
242 stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
243 stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
244 stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
245 stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
248 SYM_CODE_END(__kvm_hyp_host_forward_smc)