2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/alternative.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/assembler.h>
23 #include <asm/cpufeature.h>
24 #include <asm/debug-monitors.h>
26 #include <asm/fpsimdmacros.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/memory.h>
33 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
34 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
35 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
36 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
39 .pushsection .hyp.text, "ax"
42 .macro save_common_regs
43 // x2: base address for cpu context
46 add x3, x2, #CPU_XREG_OFFSET(19)
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
55 mrs x20, elr_el2 // pc before entering el2
56 mrs x21, spsr_el2 // pstate before entering el2
58 stp x19, x20, [x3, #96]
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
70 .macro restore_common_regs
71 // x2: base address for cpu context
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
87 msr elr_el2, x20 // pc on return from el2
88 msr spsr_el2, x21 // pstate on return from el2
90 add x3, x2, #CPU_XREG_OFFSET(19)
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
103 .macro restore_host_regs
108 // x2: cpu context address
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
114 .macro restore_fpsimd
115 // x2: cpu context address
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
121 .macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
142 add x3, x2, #CPU_XREG_OFFSET(0)
144 stp x6, x7, [x3, #16]
149 .macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
171 // x19-x29, lr, sp*, elr*, spsr*
174 // Last bits of the 64bit state
178 // Do not touch any register after this!
182 * Macros to perform system register save/restore.
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
188 * In other words, don't touch any of these unless you know what
192 // x2: base address for cpu context
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
211 mrs x18, contextidr_el1
221 stp x6, x7, [x3, #16]
222 stp x8, x9, [x3, #32]
223 stp x10, x11, [x3, #48]
224 stp x12, x13, [x3, #64]
225 stp x14, x15, [x3, #80]
226 stp x16, x17, [x3, #96]
227 stp x18, x19, [x3, #112]
228 stp x20, x21, [x3, #128]
229 stp x22, x23, [x3, #144]
230 stp x24, x25, [x3, #160]
233 .macro save_debug type
234 // x4: pointer to register set
235 // x5: number of registers to skip
239 add x22, x22, x5, lsl #2
242 mrs x21, \type\()15_el1
243 mrs x20, \type\()14_el1
244 mrs x19, \type\()13_el1
245 mrs x18, \type\()12_el1
246 mrs x17, \type\()11_el1
247 mrs x16, \type\()10_el1
248 mrs x15, \type\()9_el1
249 mrs x14, \type\()8_el1
250 mrs x13, \type\()7_el1
251 mrs x12, \type\()6_el1
252 mrs x11, \type\()5_el1
253 mrs x10, \type\()4_el1
254 mrs x9, \type\()3_el1
255 mrs x8, \type\()2_el1
256 mrs x7, \type\()1_el1
257 mrs x6, \type\()0_el1
260 add x22, x22, x5, lsl #2
263 str x21, [x4, #(15 * 8)]
264 str x20, [x4, #(14 * 8)]
265 str x19, [x4, #(13 * 8)]
266 str x18, [x4, #(12 * 8)]
267 str x17, [x4, #(11 * 8)]
268 str x16, [x4, #(10 * 8)]
269 str x15, [x4, #(9 * 8)]
270 str x14, [x4, #(8 * 8)]
271 str x13, [x4, #(7 * 8)]
272 str x12, [x4, #(6 * 8)]
273 str x11, [x4, #(5 * 8)]
274 str x10, [x4, #(4 * 8)]
275 str x9, [x4, #(3 * 8)]
276 str x8, [x4, #(2 * 8)]
277 str x7, [x4, #(1 * 8)]
278 str x6, [x4, #(0 * 8)]
281 .macro restore_sysregs
282 // x2: base address for cpu context
285 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
288 ldp x6, x7, [x3, #16]
289 ldp x8, x9, [x3, #32]
290 ldp x10, x11, [x3, #48]
291 ldp x12, x13, [x3, #64]
292 ldp x14, x15, [x3, #80]
293 ldp x16, x17, [x3, #96]
294 ldp x18, x19, [x3, #112]
295 ldp x20, x21, [x3, #128]
296 ldp x22, x23, [x3, #144]
297 ldp x24, x25, [x3, #160]
313 msr contextidr_el1, x18
323 .macro restore_debug type
324 // x4: pointer to register set
325 // x5: number of registers to skip
329 add x22, x22, x5, lsl #2
332 ldr x21, [x4, #(15 * 8)]
333 ldr x20, [x4, #(14 * 8)]
334 ldr x19, [x4, #(13 * 8)]
335 ldr x18, [x4, #(12 * 8)]
336 ldr x17, [x4, #(11 * 8)]
337 ldr x16, [x4, #(10 * 8)]
338 ldr x15, [x4, #(9 * 8)]
339 ldr x14, [x4, #(8 * 8)]
340 ldr x13, [x4, #(7 * 8)]
341 ldr x12, [x4, #(6 * 8)]
342 ldr x11, [x4, #(5 * 8)]
343 ldr x10, [x4, #(4 * 8)]
344 ldr x9, [x4, #(3 * 8)]
345 ldr x8, [x4, #(2 * 8)]
346 ldr x7, [x4, #(1 * 8)]
347 ldr x6, [x4, #(0 * 8)]
350 add x22, x22, x5, lsl #2
353 msr \type\()15_el1, x21
354 msr \type\()14_el1, x20
355 msr \type\()13_el1, x19
356 msr \type\()12_el1, x18
357 msr \type\()11_el1, x17
358 msr \type\()10_el1, x16
359 msr \type\()9_el1, x15
360 msr \type\()8_el1, x14
361 msr \type\()7_el1, x13
362 msr \type\()6_el1, x12
363 msr \type\()5_el1, x11
364 msr \type\()4_el1, x10
365 msr \type\()3_el1, x9
366 msr \type\()2_el1, x8
367 msr \type\()1_el1, x7
368 msr \type\()0_el1, x6
371 .macro skip_32bit_state tmp, target
372 // Skip 32bit state if not needed
374 tbnz \tmp, #HCR_RW_SHIFT, \target
377 .macro skip_tee_state tmp, target
378 // Skip ThumbEE state if not needed
379 mrs \tmp, id_pfr0_el1
380 tbz \tmp, #12, \target
383 .macro skip_debug_state tmp, target
384 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
385 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
389 * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
391 .macro skip_fpsimd_state tmp, target
393 tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
396 .macro compute_debug_state target
397 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
398 // is set, we do a full save/restore cycle and disable trapping.
399 add x25, x0, #VCPU_CONTEXT
401 // Check the state of MDSCR_EL1
402 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
403 and x26, x25, #DBG_MDSCR_KDE
404 and x25, x25, #DBG_MDSCR_MDE
406 b.eq 9998f // Nothing to see there
408 // If any interesting bits was set, we must set the flag
409 mov x26, #KVM_ARM64_DEBUG_DIRTY
410 str x26, [x0, #VCPU_DEBUG_FLAGS]
411 b 9999f // Don't skip restore
414 // Otherwise load the flags from memory in case we recently
416 skip_debug_state x25, \target
420 .macro save_guest_32bit_state
421 skip_32bit_state x3, 1f
423 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
429 stp x6, x7, [x3, #16]
431 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
436 skip_fpsimd_state x8, 3f
440 skip_debug_state x8, 2f
444 skip_tee_state x8, 1f
446 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
453 .macro restore_guest_32bit_state
454 skip_32bit_state x3, 1f
456 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
458 ldp x6, x7, [x3, #16]
464 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
469 skip_debug_state x8, 2f
473 skip_tee_state x8, 1f
475 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
482 .macro activate_traps
483 ldr x2, [x0, #VCPU_HCR_EL2]
486 * We are about to set CPTR_EL2.TFP to trap all floating point
487 * register accesses to EL2, however, the ARM ARM clearly states that
488 * traps are only taken to EL2 if the operation would not otherwise
489 * trap to EL1. Therefore, always make sure that for 32-bit guests,
490 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
492 tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
498 mov x2, #CPTR_EL2_TTA
499 orr x2, x2, #CPTR_EL2_TFP
502 mov x2, #(1 << 15) // Trap CP15 Cr=15
505 // Monitor Debug Config - see kvm_arm_setup_debug()
506 ldr x2, [x0, #VCPU_MDCR_EL2]
510 .macro deactivate_traps
516 and x2, x2, #MDCR_EL2_HPMN_MASK
521 ldr x1, [x0, #VCPU_KVM]
523 ldr x2, [x1, #KVM_VTTBR]
532 * Call into the vgic backend for state saving
534 .macro save_vgic_state
535 alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
537 mov x25, #HCR_INT_OVERRIDE
544 * Call into the vgic backend for state restoring
546 .macro restore_vgic_state
548 ldr x25, [x0, #VCPU_IRQ_LINES]
549 orr x24, x24, #HCR_INT_OVERRIDE
552 alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
555 .macro save_timer_state
557 ldr x2, [x0, #VCPU_KVM]
559 ldr w3, [x2, #KVM_TIMER_ENABLED]
564 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
565 bic x3, x3, #1 // Clear Enable
570 mrs x3, cntv_cval_el0
571 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
574 // Allow physical timer/counter access for the host
579 // Clear cntvoff for the host
583 .macro restore_timer_state
585 // Disallow physical timer access for the guest
586 // Physical counter access is allowed
592 ldr x2, [x0, #VCPU_KVM]
594 ldr w3, [x2, #KVM_TIMER_ENABLED]
597 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
599 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
600 msr cntv_cval_el0, x2
603 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
617 /* Save debug state */
619 // x2: ptr to CPU context
620 // x3: ptr to debug reg struct
621 // x4/x5/x6-22/x24-26: trashed
623 mrs x26, id_aa64dfr0_el1
624 ubfx x24, x26, #12, #4 // Extract BRPs
625 ubfx x25, x26, #20, #4 // Extract WRPs
627 sub w24, w26, w24 // How many BPs to skip
628 sub w25, w26, w25 // How many WPs to skip
631 add x4, x3, #DEBUG_BCR
633 add x4, x3, #DEBUG_BVR
637 add x4, x3, #DEBUG_WCR
639 add x4, x3, #DEBUG_WVR
643 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
646 /* Restore debug state */
648 // x2: ptr to CPU context
649 // x3: ptr to debug reg struct
650 // x4/x5/x6-22/x24-26: trashed
652 mrs x26, id_aa64dfr0_el1
653 ubfx x24, x26, #12, #4 // Extract BRPs
654 ubfx x25, x26, #20, #4 // Extract WRPs
656 sub w24, w26, w24 // How many BPs to skip
657 sub w25, w26, w25 // How many WPs to skip
660 add x4, x3, #DEBUG_BCR
662 add x4, x3, #DEBUG_BVR
666 add x4, x3, #DEBUG_WCR
668 add x4, x3, #DEBUG_WVR
671 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
677 skip_fpsimd_state x3, 1f
682 skip_fpsimd_state x3, 1f
686 switch_to_guest_fpsimd:
690 bic x2, x2, #CPTR_EL2_TFP
696 ldr x2, [x0, #VCPU_HOST_CONTEXT]
700 add x2, x0, #VCPU_CONTEXT
703 skip_32bit_state x3, 1f
704 ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
714 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
716 * This is the world switch. The first half of the function
717 * deals with entering the guest, and anything from __kvm_vcpu_return
718 * to the end of the function deals with reentering the host.
719 * On the enter path, only x0 (vcpu pointer) must be preserved until
720 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
721 * code) must both be preserved until the epilogue.
722 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
724 ENTRY(__kvm_vcpu_run)
726 msr tpidr_el2, x0 // Save the vcpu register
729 ldr x2, [x0, #VCPU_HOST_CONTEXT]
735 compute_debug_state 1f
736 add x3, x0, #VCPU_HOST_DEBUG_STATE
746 add x2, x0, #VCPU_CONTEXT
750 skip_debug_state x3, 1f
751 ldr x3, [x0, #VCPU_DEBUG_PTR]
755 restore_guest_32bit_state
758 // That's it, no more messing around.
762 // Assume x0 is the vcpu pointer, x1 the return code
763 // Guest's x0-x3 are on the stack
766 add x2, x0, #VCPU_CONTEXT
772 skip_debug_state x3, 1f
773 ldr x3, [x0, #VCPU_DEBUG_PTR]
777 save_guest_32bit_state
786 ldr x2, [x0, #VCPU_HOST_CONTEXT]
791 /* Clear FPSIMD and Trace trapping */
794 skip_debug_state x3, 1f
795 // Clear the dirty flag for the next run, as all the state has
796 // already been saved. Note that we nuke the whole 64bit word.
797 // If we ever add more flags, we'll have to be more careful...
798 str xzr, [x0, #VCPU_DEBUG_FLAGS]
799 add x3, x0, #VCPU_HOST_DEBUG_STATE
808 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
809 ENTRY(__kvm_tlb_flush_vmid_ipa)
813 ldr x2, [x0, #KVM_VTTBR]
818 * We could do so much better if we had the VA as well.
819 * Instead, we invalidate Stage-2 for this IPA, and the
820 * whole of Stage-1. Weep...
825 * We have to ensure completion of the invalidation at Stage-2,
826 * since a table walk on another CPU could refill a TLB with a
827 * complete (S1 + S2) walk based on the old Stage-2 mapping if
828 * the Stage-1 invalidation happened first.
837 ENDPROC(__kvm_tlb_flush_vmid_ipa)
840 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
841 * @struct kvm *kvm - pointer to kvm structure
843 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
845 ENTRY(__kvm_tlb_flush_vmid)
849 ldr x2, [x0, #KVM_VTTBR]
859 ENDPROC(__kvm_tlb_flush_vmid)
861 ENTRY(__kvm_flush_vm_context)
867 ENDPROC(__kvm_flush_vm_context)
870 // Guess the context by looking at VTTBR:
871 // If zero, then we're already a host.
872 // Otherwise restore a minimal host context before panicing.
881 ldr x2, [x0, #VCPU_HOST_CONTEXT]
886 1: adr x0, __hyp_panic_str
899 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
907 2: .quad HYP_PAGE_OFFSET
909 ENDPROC(__kvm_hyp_panic)
912 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
917 * u64 kvm_call_hyp(void *hypfn, ...);
919 * This is not really a variadic function in the classic C-way and care must
920 * be taken when calling this to ensure parameters are passed in registers
921 * only, since the stack will change between the caller and the callee.
923 * Call the function with the first argument containing a pointer to the
924 * function you wish to call in Hyp mode, and subsequent arguments will be
925 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
926 * function pointer can be passed). The function being called must be mapped
927 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
928 * passed in r0 and r1.
930 * A function pointer with a value of 0 has a special meaning, and is
931 * used to implement __hyp_get_vectors in the same way as in
932 * arch/arm64/kernel/hyp_stub.S.
937 ENDPROC(kvm_call_hyp)
939 .macro invalid_vector label, target
946 /* None of these should ever happen */
947 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
948 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
949 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
950 invalid_vector el2t_error_invalid, __kvm_hyp_panic
951 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
952 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
953 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
954 invalid_vector el2h_error_invalid, __kvm_hyp_panic
955 invalid_vector el1_sync_invalid, __kvm_hyp_panic
956 invalid_vector el1_irq_invalid, __kvm_hyp_panic
957 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
958 invalid_vector el1_error_invalid, __kvm_hyp_panic
960 el1_sync: // Guest trapped into EL2
965 lsr x2, x1, #ESR_ELx_EC_SHIFT
967 cmp x2, #ESR_ELx_EC_HVC64
970 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
971 cbnz x3, el1_trap // called HVC
973 /* Here, we're pretty sure the host called HVC. */
977 /* Check for __hyp_get_vectors */
985 * Compute the function address in EL2, and shuffle the parameters.
1003 /* Guest accessed VFP/SIMD registers, save host, restore Guest */
1004 cmp x2, #ESR_ELx_EC_FP_ASIMD
1005 b.eq switch_to_guest_fpsimd
1007 cmp x2, #ESR_ELx_EC_DABT_LOW
1008 mov x0, #ESR_ELx_EC_IABT_LOW
1010 b.ne 1f // Not an abort we care about
1012 /* This is an abort. Check for permission fault */
1013 and x2, x1, #ESR_ELx_FSC_TYPE
1015 b.ne 1f // Not a permission fault
1018 * Check for Stage-1 page table walk, which is guaranteed
1019 * to give a valid HPFAR_EL2.
1021 tbnz x1, #7, 1f // S1PTW is set
1023 /* Preserve PAR_EL1 */
1028 * Permission fault, HPFAR_EL2 is invalid.
1029 * Resolve the IPA the hard way using the guest VA.
1030 * Stage-1 translation already validated the memory access rights.
1031 * As such, we can use the EL1 translation regime, and don't have
1032 * to distinguish between EL0 and EL1 access.
1040 pop x0, xzr // Restore PAR_EL1 from the stack
1042 tbnz x3, #0, 3f // Bail out if we failed the translation
1043 ubfx x3, x3, #12, #36 // Extract IPA
1044 lsl x3, x3, #4 // and present it like HPFAR
1047 1: mrs x3, hpfar_el2
1050 2: mrs x0, tpidr_el2
1051 str w1, [x0, #VCPU_ESR_EL2]
1052 str x2, [x0, #VCPU_FAR_EL2]
1053 str x3, [x0, #VCPU_HPFAR_EL2]
1055 mov x1, #ARM_EXCEPTION_TRAP
1059 * Translation failed. Just return to the guest and
1060 * let it fault again. Another CPU is probably playing
1072 mov x1, #ARM_EXCEPTION_IRQ
1079 ENTRY(__kvm_hyp_vector)
1080 ventry el2t_sync_invalid // Synchronous EL2t
1081 ventry el2t_irq_invalid // IRQ EL2t
1082 ventry el2t_fiq_invalid // FIQ EL2t
1083 ventry el2t_error_invalid // Error EL2t
1085 ventry el2h_sync_invalid // Synchronous EL2h
1086 ventry el2h_irq_invalid // IRQ EL2h
1087 ventry el2h_fiq_invalid // FIQ EL2h
1088 ventry el2h_error_invalid // Error EL2h
1090 ventry el1_sync // Synchronous 64-bit EL1
1091 ventry el1_irq // IRQ 64-bit EL1
1092 ventry el1_fiq_invalid // FIQ 64-bit EL1
1093 ventry el1_error_invalid // Error 64-bit EL1
1095 ventry el1_sync // Synchronous 32-bit EL1
1096 ventry el1_irq // IRQ 32-bit EL1
1097 ventry el1_fiq_invalid // FIQ 32-bit EL1
1098 ventry el1_error_invalid // Error 32-bit EL1
1099 ENDPROC(__kvm_hyp_vector)
1102 ENTRY(__kvm_get_mdcr_el2)
1105 ENDPROC(__kvm_get_mdcr_el2)