2 * Copyright (c) 2015, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 #include <arm64_macros.S>
31 #include <sm/optee_smc.h>
32 #include <sm/teesmc_opteed_macros.h>
33 #include <sm/teesmc_opteed.h>
34 #include <asm-defines.h>
35 #include <kernel/thread_defs.h>
36 #include "thread_private.h"
38 .macro get_thread_ctx core_local, res, tmp0, tmp1
39 ldr w\tmp0, [\core_local, \
40 #THREAD_CORE_LOCAL_CURR_THREAD]
42 mov x\tmp1, #THREAD_CTX_SIZE
43 madd x\res, x\tmp0, x\tmp1, x\res
46 .section .text.thread_asm
47 LOCAL_FUNC vector_std_smc_entry , :
48 sub sp, sp, #THREAD_SMC_ARGS_SIZE
49 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
51 bl thread_handle_std_smc
53 * Normally thread_handle_std_smc() should return via
54 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
55 * hasn't switched stack (error detected) it will do a normal "C"
58 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
59 add sp, sp, #THREAD_SMC_ARGS_SIZE
60 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
62 b . /* SMC should not return */
63 END_FUNC vector_std_smc_entry
65 LOCAL_FUNC vector_fast_smc_entry , :
66 sub sp, sp, #THREAD_SMC_ARGS_SIZE
67 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
69 bl thread_handle_fast_smc
70 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
71 add sp, sp, #THREAD_SMC_ARGS_SIZE
72 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
74 b . /* SMC should not return */
75 END_FUNC vector_fast_smc_entry
77 LOCAL_FUNC vector_fiq_entry , :
78 /* Secure Monitor received a FIQ and passed control to us. */
79 bl thread_check_canaries
80 adr x16, thread_fiq_handler_ptr
83 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
85 b . /* SMC should not return */
86 END_FUNC vector_fiq_entry
88 LOCAL_FUNC vector_cpu_on_entry , :
89 adr x16, thread_cpu_on_handler_ptr
93 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE
95 b . /* SMC should not return */
96 END_FUNC vector_cpu_on_entry
98 LOCAL_FUNC vector_cpu_off_entry , :
99 adr x16, thread_cpu_off_handler_ptr
103 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE
105 b . /* SMC should not return */
106 END_FUNC vector_cpu_off_entry
108 LOCAL_FUNC vector_cpu_suspend_entry , :
109 adr x16, thread_cpu_suspend_handler_ptr
113 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
115 b . /* SMC should not return */
116 END_FUNC vector_cpu_suspend_entry
118 LOCAL_FUNC vector_cpu_resume_entry , :
119 adr x16, thread_cpu_resume_handler_ptr
123 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
125 b . /* SMC should not return */
126 END_FUNC vector_cpu_resume_entry
128 LOCAL_FUNC vector_system_off_entry , :
129 adr x16, thread_system_off_handler_ptr
133 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
135 b . /* SMC should not return */
136 END_FUNC vector_system_off_entry
138 LOCAL_FUNC vector_system_reset_entry , :
139 adr x16, thread_system_reset_handler_ptr
143 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
145 b . /* SMC should not return */
146 END_FUNC vector_system_reset_entry
149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
152 * Note that ARM-TF depends on the layout of this vector table, any change
153 * in layout has to be synced with ARM-TF.
155 FUNC thread_vector_table , :
156 b vector_std_smc_entry
157 b vector_fast_smc_entry
158 b vector_cpu_on_entry
159 b vector_cpu_off_entry
160 b vector_cpu_resume_entry
161 b vector_cpu_suspend_entry
163 b vector_system_off_entry
164 b vector_system_reset_entry
165 END_FUNC thread_vector_table
168 /* void thread_resume(struct thread_ctx_regs *regs) */
169 FUNC thread_resume , :
170 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
174 load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
175 ldr x0, [x0, THREAD_CTX_REGS_X0]
177 END_FUNC thread_resume
179 FUNC thread_std_smc_entry , :
180 /* pass x0-x7 in a struct thread_smc_args */
181 sub sp, sp, #THREAD_SMC_ARGS_SIZE
182 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
185 /* Call the registered handler */
186 bl __thread_std_smc_entry
189 * Load the returned x0-x3 into preserved registers and skip the
190 * "returned" x4-x7 since they will not be returned to normal
193 load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
194 add sp, sp, #THREAD_SMC_ARGS_SIZE
196 /* Mask all maskable exceptions before switching to temporary stack */
197 msr daifset, #DAIFBIT_ALL
203 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
209 b . /* SMC should not return */
210 END_FUNC thread_std_smc_entry
212 /* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
214 /* Read daif and create an SPSR */
216 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
218 /* Mask all maskable exceptions before switching to temporary stack */
219 msr daifset, #DAIFBIT_ALL
222 bl thread_get_ctx_regs
224 store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
228 pop x1, xzr /* Match "push x1, x30" above */
230 str x2, [x19, #THREAD_CTX_REGS_SP]
231 ldr x20, [sp] /* Get pointer to rv[] */
232 mov sp, x0 /* Switch to tmp stack */
234 adr x2, .thread_rpc_return
235 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
236 bl thread_state_suspend
237 mov x4, x0 /* Supply thread index */
238 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
239 load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */
241 b . /* SMC should not return */
245 * At this point has the stack pointer been restored to the value
246 * stored in THREAD_CTX above.
248 * Jumps here from thread_resume above when RPC has returned. The
249 * IRQ and FIQ bits are restored to what they where when this
250 * function was originally entered.
252 pop x16, xzr /* Get pointer to rv[] */
253 store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */
257 FUNC thread_init_vbar , :
258 adr x0, thread_vect_table
261 END_FUNC thread_init_vbar
264 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
265 * unsigned long a2, unsigned long a3, unsigned long user_sp,
266 * unsigned long user_func, unsigned long spsr,
267 * uint32_t *exit_status0, uint32_t *exit_status1)
270 FUNC __thread_enter_user_mode , :
273 * Create the and fill in the struct thread_user_mode_rec
275 sub sp, sp, #THREAD_USER_MODE_REC_SIZE
276 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
277 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
282 * Save kern sp in x19
284 msr daifset, #DAIFBIT_ALL
289 * Save the kernel stack pointer in the thread context
291 /* get pointer to current thread context */
292 get_thread_ctx sp, 21, 20, 22
294 * Save kernel stack pointer to ensure that el0_svc() uses
295 * correct stack pointer
297 str x19, [x21, #THREAD_CTX_KERN_SP]
300 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
304 mov x13, x4 /* Used when running TA in Aarch32 */
305 msr sp_el0, x4 /* Used when running TA in Aarch64 */
306 /* Set user function */
309 /* Jump into user mode */
311 END_FUNC __thread_enter_user_mode
314 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
315 * uint32_t exit_status1);
316 * See description in thread.h
318 FUNC thread_unwind_user_mode , :
319 /* Store the exit status */
320 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
323 /* Restore x19..x30 */
324 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
325 add sp, sp, #THREAD_USER_MODE_REC_SIZE
326 /* Return from the call of thread_enter_user_mode() */
328 END_FUNC thread_unwind_user_mode
331 * This macro verifies that the a given vector doesn't exceed the
332 * architectural limit of 32 instructions. This is meant to be placed
333 * immedately after the last instruction in the vector. It takes the
334 * vector entry as the parameter
336 .macro check_vector_size since
337 .if (. - \since) > (32 * 4)
338 .error "Vector exceeds 32 instructions"
344 LOCAL_FUNC thread_vect_table , :
345 /* -----------------------------------------------------
346 * EL1 with SP0 : 0x0 - 0x180
347 * -----------------------------------------------------
351 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
353 check_vector_size sync_el1_sp0
357 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
359 check_vector_size irq_el1_sp0
363 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
365 check_vector_size fiq_el1_sp0
370 check_vector_size SErrorSP0
372 /* -----------------------------------------------------
373 * Current EL with SPx: 0x200 - 0x380
374 * -----------------------------------------------------
377 SynchronousExceptionSPx:
378 b SynchronousExceptionSPx
379 check_vector_size SynchronousExceptionSPx
384 check_vector_size IrqSPx
389 check_vector_size FiqSPx
394 check_vector_size SErrorSPx
396 /* -----------------------------------------------------
397 * Lower EL using AArch64 : 0x400 - 0x580
398 * -----------------------------------------------------
402 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
405 lsr x2, x2, #ESR_EC_SHIFT
406 cmp x2, #ESR_EC_AARCH64_SVC
409 check_vector_size el0_sync_a64
413 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
415 check_vector_size el0_irq_a64
419 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
421 check_vector_size el0_fiq_a64
426 check_vector_size SErrorA64
428 /* -----------------------------------------------------
429 * Lower EL using AArch32 : 0x0 - 0x180
430 * -----------------------------------------------------
434 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
437 lsr x2, x2, #ESR_EC_SHIFT
438 cmp x2, #ESR_EC_AARCH32_SVC
441 check_vector_size el0_sync_a32
445 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
447 check_vector_size el0_irq_a32
451 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
453 check_vector_size el0_fiq_a32
458 check_vector_size SErrorA32
460 END_FUNC thread_vect_table
462 LOCAL_FUNC el0_svc , :
463 /* get pointer to current thread context in x0 */
464 get_thread_ctx sp, 0, 1, 2
465 /* load saved kernel sp */
466 ldr x0, [x0, #THREAD_CTX_KERN_SP]
467 /* Keep pointer to initial recod in x1 */
469 /* Switch to SP_EL0 and restore kernel sp */
471 mov x2, sp /* Save SP_EL0 */
474 /* Make room for struct thread_svc_regs */
475 sub sp, sp, #THREAD_SVC_REG_SIZE
476 stp x30,x2, [sp, #THREAD_SVC_REG_X30]
479 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
480 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
482 /* Prepare the argument for the handler */
483 store_xregs sp, THREAD_SVC_REG_X0, 0, 14
486 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
490 * Unmask FIQ, Serror, and debug exceptions since we have nothing
491 * left in sp_el1. Note that the SVC handler is excepted to
492 * re-enable IRQs by itself.
494 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
496 /* Call the handler */
499 /* Mask all maskable exceptions since we're switching back to sp_el1 */
500 msr daifset, #DAIFBIT_ALL
503 * Save kernel sp we'll had at the beginning of this function.
504 * This is when this TA has called another TA because
505 * __thread_enter_user_mode() also saves the stack pointer in this
509 get_thread_ctx sp, 0, 1, 2
511 add x1, sp, #THREAD_SVC_REG_SIZE
512 str x1, [x0, #THREAD_CTX_KERN_SP]
514 /* Restore registers to the required state and return*/
515 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
518 load_xregs sp, THREAD_SVC_REG_X0, 0, 14
520 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0]
522 ldr x0, [x30, THREAD_SVC_REG_X0]
523 ldr x30, [x30, #THREAD_SVC_REG_X30]
528 LOCAL_FUNC el1_sync_abort , :
531 mov x3, sp /* Save original sp */
534 * Update core local flags.
535 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
537 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
538 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
539 orr w1, w1, #THREAD_CLF_ABORT
540 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
543 /* Select abort stack */
544 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
548 /* Select tmp stack */
549 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
550 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
554 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
557 * Save state on stack
559 sub sp, sp, #THREAD_ABT_REGS_SIZE
561 /* Store spsr, sp_el0 */
562 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
563 /* Store original x0, x1 */
564 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
565 stp x2, x3, [sp, #THREAD_ABT_REG_X0]
566 /* Store original x2, x3 and x4 to x29 */
567 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
568 store_xregs sp, THREAD_ABT_REG_X2, 2, 29
569 /* Store x30, elr_el1 */
571 stp x30, x0, [sp, #THREAD_ABT_REG_X30]
581 * Restore state from stack
583 /* Load x30, elr_el1 */
584 ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
587 load_xregs sp, THREAD_ABT_REG_X0, 0, 29
588 /* Switch to SP_EL1 */
590 /* Save x0 to x3 in CORE_LOCAL */
591 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
592 /* Restore spsr_el1 and sp_el0 */
594 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
598 /* Update core local flags */
599 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
600 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
601 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
603 /* Restore x0 to x3 */
604 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
606 /* Return from exception */
608 END_FUNC el1_sync_abort
611 LOCAL_FUNC el0_sync_abort , :
613 * Update core local flags
615 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
616 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
617 orr w1, w1, #THREAD_CLF_ABORT
618 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
621 * Save state on stack
624 /* load abt_stack_va_end */
625 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
626 /* Keep pointer to initial record in x0 */
628 /* Switch to SP_EL0 */
631 sub sp, sp, #THREAD_ABT_REGS_SIZE
633 /* Store spsr, sp_el0 */
634 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
635 /* Store original x0, x1 */
636 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
637 stp x2, x3, [sp, #THREAD_ABT_REG_X0]
638 /* Store original x2, x3 and x4 to x29 */
639 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
640 store_xregs sp, THREAD_ABT_REG_X2, 2, 29
641 /* Store x30, elr_el1 */
643 stp x30, x0, [sp, #THREAD_ABT_REG_X30]
653 * Restore state from stack
656 /* Load x30, elr_el1 */
657 ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
660 load_xregs sp, THREAD_ABT_REG_X0, 0, 29
661 /* Switch to SP_EL1 */
663 /* Save x0 to x3 in EL1_REC */
664 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
665 /* Restore spsr_el1 and sp_el0 */
667 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
671 /* Update core local flags */
672 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
673 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
674 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
676 /* Restore x0 to x3 */
677 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
679 /* Return from exception */
681 END_FUNC el0_sync_abort
683 LOCAL_FUNC elx_irq , :
685 * Update core local flags
687 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
688 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
689 orr w1, w1, #THREAD_CLF_TMP
690 orr w1, w1, #THREAD_CLF_IRQ
691 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
693 /* get pointer to current thread context in x0 */
694 get_thread_ctx sp, 0, 1, 2
695 /* Keep original SP_EL0 */
698 /* Store original sp_el0 */
699 str x2, [x0, #THREAD_CTX_REGS_SP]
701 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
702 /* Load original x0..x3 into x10..x13 */
703 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
704 /* Save original x0..x3 */
705 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
707 /* load tmp_stack_va_end */
708 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
709 /* Switch to SP_EL0 */
714 * Mark current thread as suspended
716 mov w0, #THREAD_FLAGS_EXIT_ON_IRQ
719 bl thread_state_suspend
720 mov w4, w0 /* Supply thread index */
722 /* Update core local flags */
723 /* Switch to SP_EL1 */
725 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
726 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
727 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
731 * Note that we're exiting with SP_EL0 selected since the entry
732 * functions expects to have SP_EL0 selected with the tmp stack
736 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
737 ldr w1, =OPTEE_SMC_RETURN_RPC_IRQ
740 /* w4 is already filled in above */
742 b . /* SMC should not return */
746 * This struct is never used from C it's only here to visualize the
749 * struct elx_fiq_rec {
750 * uint64_t x[19 - 4]; x4..x18
755 #define ELX_FIQ_REC_X(x) (8 * ((x) - 4))
756 #define ELX_FIQ_REC_LR (8 + ELX_FIQ_REC_X(19))
757 #define ELX_FIQ_REC_SP_EL0 (8 + ELX_FIQ_REC_LR)
758 #define ELX_FIQ_REC_SIZE (8 + ELX_FIQ_REC_SP_EL0)
760 LOCAL_FUNC elx_fiq , :
762 * Update core local flags
764 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
765 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
766 orr w1, w1, #THREAD_CLF_FIQ
767 orr w1, w1, #THREAD_CLF_TMP
768 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
770 /* load tmp_stack_va_end */
771 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
772 /* Keep original SP_EL0 */
774 /* Switch to SP_EL0 */
779 * Save registers on stack that can be corrupted by a call to
782 /* Make room for struct elx_fiq_rec */
783 sub sp, sp, #ELX_FIQ_REC_SIZE
785 store_xregs sp, ELX_FIQ_REC_X(4), 4, 18
786 /* Store lr and original sp_el0 */
787 stp x30, x2, [sp, #ELX_FIQ_REC_LR]
789 bl thread_check_canaries
790 adr x16, thread_fiq_handler_ptr
797 /* Restore x4..x18 */
798 load_xregs sp, ELX_FIQ_REC_X(4), 4, 18
799 /* Load lr and original sp_el0 */
800 ldp x30, x2, [sp, #ELX_FIQ_REC_LR]
803 /* Switch back to SP_EL1 */
806 /* Update core local flags */
807 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
808 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
809 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
812 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
814 /* Return from exception */