2 * Copyright (c) 2014, STMicroelectronics International N.V.
3 * Copyright (c) 2016-2017, Linaro Limited
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #ifndef KERNEL_THREAD_H
30 #define KERNEL_THREAD_H
34 #include <types_ext.h>
36 #include <optee_msg.h>
37 #include <kernel/mutex.h>
38 #include <kernel/vfp.h>
39 #include <mm/pgt_cache.h>
43 #define THREAD_ID_INVALID -1
45 #define THREAD_RPC_MAX_NUM_PARAMS 4
48 struct thread_vector_table {
49 uint32_t std_smc_entry;
50 uint32_t fast_smc_entry;
51 uint32_t cpu_on_entry;
52 uint32_t cpu_off_entry;
53 uint32_t cpu_resume_entry;
54 uint32_t cpu_suspend_entry;
56 uint32_t system_off_entry;
57 uint32_t system_reset_entry;
59 extern struct thread_vector_table thread_vector_table;
61 struct thread_specific_data {
62 TAILQ_HEAD(, tee_ta_session) sess_stack;
63 struct tee_ta_ctx *ctx;
64 #ifdef CFG_SMALL_PAGE_USER_TA
65 struct pgt_cache pgt_cache;
68 paddr_t rpc_fs_payload_pa;
69 uint64_t rpc_fs_payload_cookie;
70 size_t rpc_fs_payload_size;
73 struct thread_user_vfp_state {
80 struct thread_smc_args {
81 uint32_t a0; /* SMC function ID */
82 uint32_t a1; /* Parameter */
83 uint32_t a2; /* Parameter */
84 uint32_t a3; /* Thread ID when returning from RPC */
85 uint32_t a4; /* Not used */
86 uint32_t a5; /* Not used */
87 uint32_t a6; /* Not used */
88 uint32_t a7; /* Hypervisor Client ID */
92 struct thread_smc_args {
93 uint64_t a0; /* SMC function ID */
94 uint64_t a1; /* Parameter */
95 uint64_t a2; /* Parameter */
96 uint64_t a3; /* Thread ID when returning from RPC */
97 uint64_t a4; /* Not used */
98 uint64_t a5; /* Not used */
99 uint64_t a6; /* Not used */
100 uint64_t a7; /* Hypervisor Client ID */
105 struct thread_abort_regs {
127 struct thread_abort_regs {
128 uint64_t x0; /* r0_usr */
129 uint64_t x1; /* r1_usr */
130 uint64_t x2; /* r2_usr */
131 uint64_t x3; /* r3_usr */
132 uint64_t x4; /* r4_usr */
133 uint64_t x5; /* r5_usr */
134 uint64_t x6; /* r6_usr */
135 uint64_t x7; /* r7_usr */
136 uint64_t x8; /* r8_usr */
137 uint64_t x9; /* r9_usr */
138 uint64_t x10; /* r10_usr */
139 uint64_t x11; /* r11_usr */
140 uint64_t x12; /* r12_usr */
141 uint64_t x13; /* r13/sp_usr */
142 uint64_t x14; /* r14/lr_usr */
166 struct thread_svc_regs {
180 struct thread_svc_regs {
183 uint64_t x0; /* r0_usr */
184 uint64_t x1; /* r1_usr */
185 uint64_t x2; /* r2_usr */
186 uint64_t x3; /* r3_usr */
187 uint64_t x4; /* r4_usr */
188 uint64_t x5; /* r5_usr */
189 uint64_t x6; /* r6_usr */
190 uint64_t x7; /* r7_usr */
191 uint64_t x8; /* r8_usr */
192 uint64_t x9; /* r9_usr */
193 uint64_t x10; /* r10_usr */
194 uint64_t x11; /* r11_usr */
195 uint64_t x12; /* r12_usr */
196 uint64_t x13; /* r13/sp_usr */
197 uint64_t x14; /* r14/lr_usr */
206 typedef void (*thread_smc_handler_t)(struct thread_smc_args *args);
207 typedef void (*thread_nintr_handler_t)(void);
208 typedef unsigned long (*thread_pm_handler_t)(unsigned long a0,
210 struct thread_handlers {
212 * stdcall and fastcall are called as regular functions and
213 * normal ARM Calling Convention applies. Return values are passed
214 * args->param{1-3} and forwarded into r0-r3 when returned to
217 * stdcall handles calls which can be preemted from non-secure
218 * world. This handler is executed with a large stack.
220 * fastcall handles fast calls which can't be preemted. This
221 * handler is executed with a limited stack. This handler must not
222 * cause any aborts or reenenable native interrupts which are
223 * temporarily masked while executing this handler.
225 * TODO investigate if we should execute fastcalls and native interrupts
226 * on different stacks allowing native interrupts to be enabled during
229 thread_smc_handler_t std_smc;
230 thread_smc_handler_t fast_smc;
233 * fiq is called as a regular function and normal ARM Calling
234 * Convention applies.
236 * This handler handles native interrupts which can't be preemted. This
237 * handler is executed with a limited stack. This handler must not cause
238 * any aborts or reenenable native interrupts which are temporarily
239 * masked while executing this handler.
241 thread_nintr_handler_t nintr;
244 * Power management handlers triggered from ARM Trusted Firmware.
245 * Not used when using internal monitor.
247 thread_pm_handler_t cpu_on;
248 thread_pm_handler_t cpu_off;
249 thread_pm_handler_t cpu_suspend;
250 thread_pm_handler_t cpu_resume;
251 thread_pm_handler_t system_off;
252 thread_pm_handler_t system_reset;
254 void thread_init_primary(const struct thread_handlers *handlers);
255 void thread_init_per_cpu(void);
258 * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
259 * first stack, THREAD_ID_0 + 1 for the next and so on.
261 * Returns true on success and false on errors.
263 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
266 * Initializes a thread to be used during boot
268 void thread_init_boot_thread(void);
271 * Clears the current thread id
272 * Only supposed to be used during initialization.
274 void thread_clr_boot_thread(void);
277 * Returns current thread id.
279 int thread_get_id(void);
282 * Returns current thread id, return -1 on failure.
284 int thread_get_id_may_fail(void);
286 /* Returns Thread Specific Data (TSD) pointer. */
287 struct thread_specific_data *thread_get_tsd(void);
290 * Sets foreign interrupts status for current thread, must only be called
291 * from an active thread context.
293 * enable == true -> enable foreign interrupts
294 * enable == false -> disable foreign interrupts
296 void thread_set_foreign_intr(bool enable);
299 * Restores the foreign interrupts status (in CPSR) for current thread, must
300 * only be called from an active thread context.
302 void thread_restore_foreign_intr(void);
305 * Defines the bits for the exception mask used the the
306 * thread_*_exceptions() functions below.
307 * These definitions are compatible with both ARM32 and ARM64.
309 #define THREAD_EXCP_FOREIGN_INTR (ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
310 #define THREAD_EXCP_NATIVE_INTR (ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
311 #define THREAD_EXCP_ALL (THREAD_EXCP_FOREIGN_INTR \
312 | THREAD_EXCP_NATIVE_INTR \
313 | (ARM32_CPSR_A >> ARM32_CPSR_F_SHIFT))
316 * thread_get_exceptions() - return current exception mask
318 uint32_t thread_get_exceptions(void);
321 * thread_set_exceptions() - set exception mask
322 * @exceptions: exception mask to set
324 * Any previous exception mask is replaced by this exception mask, that is,
325 * old bits are cleared and replaced by these.
327 void thread_set_exceptions(uint32_t exceptions);
330 * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
331 * @exceptions exceptions to mask
332 * @returns old exception state
334 uint32_t thread_mask_exceptions(uint32_t exceptions);
337 * thread_unmask_exceptions() - Unmasks asynchronous exceptions
338 * @state Old asynchronous exception state to restore (returned by
339 * thread_mask_exceptions())
341 void thread_unmask_exceptions(uint32_t state);
344 static inline bool thread_foreign_intr_disabled(void)
346 return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
351 * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
353 * Foreign interrupts are masked while VFP is enabled. User space must not be
354 * entered before thread_kernel_disable_vfp() has been called to disable VFP
355 * and restore the foreign interrupt status.
357 * This function may only be called from an active thread context and may
358 * not be called again before thread_kernel_disable_vfp() has been called.
360 * VFP state is saved as needed.
362 * Returns a state variable that should be passed to
363 * thread_kernel_disable_vfp().
365 uint32_t thread_kernel_enable_vfp(void);
368 * thread_kernel_disable_vfp() - Disables usage of VFP
369 * @state: state variable returned by thread_kernel_enable_vfp()
371 * Disables usage of VFP and restores foreign interrupt status after a call to
372 * thread_kernel_enable_vfp().
374 * This function may only be called after a call to
375 * thread_kernel_enable_vfp().
377 void thread_kernel_disable_vfp(uint32_t state);
380 * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
382 void thread_kernel_save_vfp(void);
385 * thread_kernel_save_vfp() - Restores kernel vfp state
387 void thread_kernel_restore_vfp(void);
390 * thread_user_enable_vfp() - Enables vfp for user mode usage
391 * @uvfp: pointer to where to save the vfp state if needed
393 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
394 #else /*CFG_WITH_VFP*/
395 static inline void thread_kernel_save_vfp(void)
399 static inline void thread_kernel_restore_vfp(void)
402 #endif /*CFG_WITH_VFP*/
405 * thread_user_save_vfp() - Saves the user vfp state if enabled
408 void thread_user_save_vfp(void);
410 static inline void thread_user_save_vfp(void)
416 * thread_user_clear_vfp() - Clears the vfp state
417 * @uvfp: pointer to saved state to clear
420 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp);
422 static inline void thread_user_clear_vfp(
423 struct thread_user_vfp_state *uvfp __unused)
430 * thread_enter_user_mode() - Enters user mode
431 * @a0: Passed in r/x0 for user_func
432 * @a1: Passed in r/x1 for user_func
433 * @a2: Passed in r/x2 for user_func
434 * @a3: Passed in r/x3 for user_func
435 * @user_sp: Assigned sp value in user mode
436 * @user_func: Function to execute in user mode
437 * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
438 * @exit_status0: Pointer to opaque exit staus 0
439 * @exit_status1: Pointer to opaque exit staus 1
441 * This functions enters user mode with the argument described above,
442 * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
443 * when returning back to the caller of this function through an exception
446 * @Returns what's passed in "ret" to thread_unwind_user_mode()
448 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
449 unsigned long a2, unsigned long a3, unsigned long user_sp,
450 unsigned long entry_func, bool is_32bit,
451 uint32_t *exit_status0, uint32_t *exit_status1);
454 * thread_unwind_user_mode() - Unwinds kernel stack from user entry
455 * @ret: Value to return from thread_enter_user_mode()
456 * @exit_status0: Exit status 0
457 * @exit_status1: Exit status 1
459 * This is the function that exception handlers can return into
460 * to resume execution in kernel mode instead of user mode.
462 * This function is closely coupled with thread_enter_user_mode() since it
463 * need to restore registers saved by thread_enter_user_mode() and when it
464 * returns make it look like thread_enter_user_mode() just returned. It is
465 * expected that the stack pointer is where thread_enter_user_mode() left
466 * it. The stack will be unwound and the function will return to where
467 * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
468 * are filled in the corresponding pointers supplied to
469 * thread_enter_user_mode().
471 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
472 uint32_t exit_status1);
476 * thread_get_saved_thread_sp() - Returns the saved sp of current thread
478 * When switching from the thread stack pointer the value is stored
479 * separately in the current thread context. This function returns this
482 * @returns stack pointer
484 vaddr_t thread_get_saved_thread_sp(void);
487 bool thread_addr_is_in_stack(vaddr_t va);
490 * Adds a mutex to the list of held mutexes for current thread
491 * Requires foreign interrupts to be disabled.
493 void thread_add_mutex(struct mutex *m);
496 * Removes a mutex from the list of held mutexes for current thread
497 * Requires foreign interrupts to be disabled.
499 void thread_rem_mutex(struct mutex *m);
502 * Disables and empties the prealloc RPC cache one reference at a time. If
503 * all threads are idle this function returns true and a cookie of one shm
504 * object which was removed from the cache. When the cache is empty *cookie
505 * is set to 0 and the cache is disabled else a valid cookie value. If one
506 * thread isn't idle this function returns false.
508 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
511 * Enabled the prealloc RPC cache. If all threads are idle the cache is
512 * enabled and this function returns true. If one thread isn't idle this
513 * function return false.
515 bool thread_enable_prealloc_rpc_cache(void);
518 * Allocates data for struct optee_msg_arg.
520 * @size: size in bytes of struct optee_msg_arg
521 * @arg: returned physcial pointer to a struct optee_msg_arg buffer,
522 * 0 if allocation failed.
523 * @cookie: returned cookie used when freeing the buffer
525 void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie);
528 * Free physical memory previously allocated with thread_rpc_alloc_arg()
530 * @cookie: cookie received when allocating the buffer
532 void thread_rpc_free_arg(uint64_t cookie);
535 * Allocates data for payload buffers.
537 * @size: size in bytes of payload buffer
538 * @payload: returned physcial pointer to payload buffer, 0 if allocation
540 * @cookie: returned cookie used when freeing the buffer
542 void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie);
545 * Free physical memory previously allocated with thread_rpc_alloc_payload()
547 * @cookie: cookie received when allocating the buffer
549 void thread_rpc_free_payload(uint64_t cookie);
552 * Does an RPC using a preallocated argument buffer
554 * @num_params: number of parameters (max 2)
555 * @params: RPC parameters
556 * @returns RPC return value
558 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
559 struct optee_msg_param *params);
563 #endif /*KERNEL_THREAD_H*/