2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
31 #include <arm32_macros.S>
32 #include <sm/optee_smc.h>
33 #include <sm/teesmc_opteed_macros.h>
34 #include <sm/teesmc_opteed.h>
35 #include <kernel/abort.h>
36 #include <kernel/thread_defs.h>
37 #include <kernel/unwind.h>
39 .section .text.thread_asm
41 LOCAL_FUNC vector_std_smc_entry , :
46 bl thread_handle_std_smc
48 * Normally thread_handle_std_smc() should return via
49 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
50 * hasn't switched stack (error detected) it will do a normal "C"
54 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
56 b . /* SMC should not return */
58 END_FUNC vector_std_smc_entry
60 LOCAL_FUNC vector_fast_smc_entry , :
65 bl thread_handle_fast_smc
67 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
69 b . /* SMC should not return */
71 END_FUNC vector_fast_smc_entry
73 LOCAL_FUNC vector_fiq_entry , :
76 /* Secure Monitor received a FIQ and passed control to us. */
77 bl thread_check_canaries
78 ldr lr, =thread_nintr_handler_ptr
82 ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
84 b . /* SMC should not return */
86 END_FUNC vector_fiq_entry
88 LOCAL_FUNC vector_cpu_on_entry , :
91 ldr lr, =thread_cpu_on_handler_ptr
95 ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE
97 b . /* SMC should not return */
99 END_FUNC vector_cpu_on_entry
101 LOCAL_FUNC vector_cpu_off_entry , :
104 ldr lr, =thread_cpu_off_handler_ptr
108 ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE
110 b . /* SMC should not return */
112 END_FUNC vector_cpu_off_entry
114 LOCAL_FUNC vector_cpu_suspend_entry , :
117 ldr lr, =thread_cpu_suspend_handler_ptr
121 ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
123 b . /* SMC should not return */
125 END_FUNC vector_cpu_suspend_entry
127 LOCAL_FUNC vector_cpu_resume_entry , :
130 ldr lr, =thread_cpu_resume_handler_ptr
134 ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
136 b . /* SMC should not return */
138 END_FUNC vector_cpu_resume_entry
140 LOCAL_FUNC vector_system_off_entry , :
143 ldr lr, =thread_system_off_handler_ptr
147 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
149 b . /* SMC should not return */
151 END_FUNC vector_system_off_entry
153 LOCAL_FUNC vector_system_reset_entry , :
156 ldr lr, =thread_system_reset_handler_ptr
160 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
162 b . /* SMC should not return */
164 END_FUNC vector_system_reset_entry
167 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
168 * initialization. Also used when compiled with the internal monitor, but
169 * the cpu_*_entry and system_*_entry are not used then.
171 * Note that ARM-TF depends on the layout of this vector table, any change
172 * in layout has to be synced with ARM-TF.
174 FUNC thread_vector_table , :
177 b vector_std_smc_entry
178 b vector_fast_smc_entry
179 b vector_cpu_on_entry
180 b vector_cpu_off_entry
181 b vector_cpu_resume_entry
182 b vector_cpu_suspend_entry
184 b vector_system_off_entry
185 b vector_system_reset_entry
187 END_FUNC thread_vector_table
189 FUNC thread_set_abt_sp , :
198 END_FUNC thread_set_abt_sp
200 FUNC thread_set_irq_sp , :
209 END_FUNC thread_set_irq_sp
211 FUNC thread_set_fiq_sp , :
220 END_FUNC thread_set_fiq_sp
222 /* void thread_resume(struct thread_ctx_regs *regs) */
223 FUNC thread_resume , :
226 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */
232 ldm r12!, {r1, sp, lr}
241 /* Restore CPSR and jump to the instruction to resume at */
244 END_FUNC thread_resume
247 * Disables IRQ and FIQ and saves state of thread, returns original
250 LOCAL_FUNC thread_save_state , :
255 * Uses stack for temporary storage, while storing needed
256 * context in the thread context struct.
261 cpsid aif /* Disable Async abort, IRQ and FIQ */
266 mov r5, r12 /* Save CPSR in a preserved register */
267 mrs r6, cpsr /* Save current CPSR */
269 bl thread_get_ctx_regs
271 pop {r1-r4} /* r0-r3 pushed above */
273 pop {r1-r4} /* r4-r7 pushed above */
285 stm r0!, {r1, sp, lr}
287 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */
288 msr cpsr, r6 /* Restore mode */
290 mov r0, r5 /* Return original CPSR */
293 END_FUNC thread_save_state
295 FUNC thread_std_smc_entry , :
298 /* Pass r0-r7 in a struct thread_smc_args */
301 bl __thread_std_smc_entry
303 * Load the returned r0-r3 into preserved registers and skip the
304 * "returned" r4-r7 since they will not be returned to normal
310 /* Disable interrupts before switching to temporary stack */
317 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
323 b . /* SMC should not return */
325 END_FUNC thread_std_smc_entry
328 /* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
331 * r0-r2 are used to pass parameters to normal world
332 * r0-r5 are used to pass return vaule back from normal world
334 * note that r3 is used to pass "resume information", that is, which
335 * thread it is that should resume.
337 * Since the this function is following AAPCS we need to preserve r4-r5
338 * which are otherwise modified when returning back from normal world.
342 UNWIND( .save {r4-r5, lr})
347 mov r4, r0 /* Save original CPSR */
350 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
353 ldr r5, [sp] /* Get pointer to rv[] */
354 cps #CPSR_MODE_SVC /* Change to SVC mode */
355 mov sp, r0 /* Switch to tmp stack */
357 mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
358 mov r1, r4 /* CPSR to restore */
359 ldr r2, =.thread_rpc_return
360 bl thread_state_suspend
361 mov r4, r0 /* Supply thread index */
362 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
363 ldm r5, {r1-r3} /* Load rv[] into r0-r2 */
365 b . /* SMC should not return */
369 * At this point has the stack pointer been restored to the value
370 * it had when thread_save_state() was called above.
372 * Jumps here from thread_resume above when RPC has returned. The
373 * IRQ and FIQ bits are restored to what they where when this
374 * function was originally entered.
376 pop {r12} /* Get pointer to rv[] */
377 stm r12, {r0-r5} /* Store r0-r5 into rv[] */
382 LOCAL_FUNC thread_fiq_handler , :
385 /* FIQ has a +4 offset for lr compared to preferred return address */
388 * We're saving {r0-r3} and the banked fiq registers {r8-r12}. The
389 * banked fiq registers need to be saved because the secure monitor
390 * doesn't save those. The treatment of the banked fiq registers is
391 * somewhat analogous to the lazy save of VFP registers.
393 push {r0-r3, r8-r12, lr}
394 bl thread_check_canaries
395 ldr lr, =thread_nintr_handler_ptr
398 pop {r0-r3, r8-r12, lr}
401 END_FUNC thread_fiq_handler
403 LOCAL_FUNC thread_irq_handler , :
407 * IRQ mode is set up to use tmp stack so FIQ has to be
408 * disabled before touching the stack. We can also assign
409 * SVC sp from IRQ sp to get SVC mode into the state we
410 * need when doing the SMC below.
412 cpsid f /* Disable FIQ also */
419 mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
423 blx thread_state_suspend
424 mov r4, r0 /* Supply thread index */
427 * Switch to SVC mode and copy current stack pointer as it already
434 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
435 ldr r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
438 /* r4 is already filled in above */
440 b . /* SMC should not return */
442 END_FUNC thread_irq_handler
444 FUNC thread_init_vbar , :
446 /* Set vector (VBAR) */
447 ldr r0, =thread_vect_table
451 END_FUNC thread_init_vbar
454 * Below are low level routines handling entry and return from user mode.
456 * thread_enter_user_mode() saves all that registers user mode can change
457 * so kernel mode can restore needed registers when resuming execution
458 * after the call to thread_enter_user_mode() has returned.
459 * thread_enter_user_mode() doesn't return directly since it enters user
460 * mode instead, it's thread_unwind_user_mode() that does the
461 * returning by restoring the registers saved by thread_enter_user_mode().
463 * There's three ways for thread_enter_user_mode() to return to caller,
464 * user TA calls utee_return, user TA calls utee_panic or through an abort.
466 * Calls to utee_return or utee_panic are handled as:
467 * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which
468 * calls syscall_return() or syscall_panic().
470 * These function calls returns normally except thread_svc_handler() which
471 * which is an exception handling routine so it reads return address and
472 * SPSR to restore from the stack. syscall_return() and syscall_panic()
473 * changes return address and SPSR used by thread_svc_handler() to instead of
474 * returning into user mode as with other syscalls it returns into
475 * thread_unwind_user_mode() in kernel mode instead. When
476 * thread_svc_handler() returns the stack pointer at the point where
477 * thread_enter_user_mode() left it so this is where
478 * thread_unwind_user_mode() can operate.
480 * Aborts are handled in a similar way but by thread_abort_handler()
481 * instead, when the pager sees that it's an abort from user mode that
482 * can't be handled it updates SPSR and return address used by
483 * thread_abort_handler() to return into thread_unwind_user_mode()
488 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
489 * unsigned long a2, unsigned long a3, unsigned long user_sp,
490 * unsigned long user_func, unsigned long spsr,
491 * uint32_t *exit_status0, uint32_t *exit_status1)
494 FUNC __thread_enter_user_mode , :
498 * Save all registers to allow syscall_return() to resume execution
499 * as if this function would have returned. This is also used in
502 * If stack usage of this function is changed
503 * thread_unwind_user_mode() has to be updated.
507 ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */
508 ldr r5, [sp, #(11 * 0x4)] /* user function */
509 ldr r6, [sp, #(12 * 0x4)] /* spsr */
512 * Set the saved Processors Status Register to user mode to allow
513 * entry of user mode through movs below.
518 * Save old user sp and set new user sp.
527 * Don't allow return from this function, return is done through
528 * thread_unwind_user_mode() below.
531 /* Call the user function with its arguments */
534 END_FUNC __thread_enter_user_mode
537 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
538 * uint32_t exit_status1);
539 * See description in thread.h
541 FUNC thread_unwind_user_mode , :
544 ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */
546 ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */
549 /* Restore old user sp */
555 pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/
557 END_FUNC thread_unwind_user_mode
559 LOCAL_FUNC thread_abort_handler , :
560 thread_abort_handler:
565 * Switch to abort mode to use that stack instead.
576 msr spsr_fsxc, r0 /* In case some code reads spsr directly */
577 mov r0, #ABORT_TYPE_UNDEF
578 b .thread_abort_generic
580 thread_dabort_handler:
585 mov r0, #ABORT_TYPE_DATA
586 b .thread_abort_generic
588 thread_pabort_handler:
593 mov r0, #ABORT_TYPE_PREFETCH
594 b .thread_abort_generic
596 .thread_abort_generic:
615 END_FUNC thread_abort_handler
617 LOCAL_FUNC thread_svc_handler , :
630 END_FUNC thread_svc_handler
633 LOCAL_FUNC thread_vect_table , :
637 b thread_und_handler /* Undefined instruction */
638 b thread_svc_handler /* System call */
639 b thread_pabort_handler /* Prefetch abort */
640 b thread_dabort_handler /* Data abort */
642 b thread_irq_handler /* IRQ */
643 b thread_fiq_handler /* FIQ */
645 END_FUNC thread_vect_table