2 * Copyright (c) 2014, STMicroelectronics International N.V.
3 * Copyright (c) 2016-2017, Linaro Limited
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #ifndef KERNEL_THREAD_H
30 #define KERNEL_THREAD_H
33 #include <types_ext.h>
35 #include <optee_msg.h>
36 #include <kernel/mutex.h>
37 #include <kernel/vfp.h>
38 #include <mm/pgt_cache.h>
42 #define THREAD_ID_INVALID -1
44 #define THREAD_RPC_MAX_NUM_PARAMS 4
47 struct thread_vector_table {
48 uint32_t std_smc_entry;
49 uint32_t fast_smc_entry;
50 uint32_t cpu_on_entry;
51 uint32_t cpu_off_entry;
52 uint32_t cpu_resume_entry;
53 uint32_t cpu_suspend_entry;
55 uint32_t system_off_entry;
56 uint32_t system_reset_entry;
58 extern struct thread_vector_table thread_vector_table;
60 struct thread_specific_data {
61 TAILQ_HEAD(, tee_ta_session) sess_stack;
62 struct tee_ta_ctx *ctx;
63 #ifdef CFG_SMALL_PAGE_USER_TA
64 struct pgt_cache pgt_cache;
67 paddr_t rpc_fs_payload_pa;
68 uint64_t rpc_fs_payload_cookie;
69 size_t rpc_fs_payload_size;
72 struct thread_user_vfp_state {
79 struct thread_smc_args {
80 uint32_t a0; /* SMC function ID */
81 uint32_t a1; /* Parameter */
82 uint32_t a2; /* Parameter */
83 uint32_t a3; /* Thread ID when returning from RPC */
84 uint32_t a4; /* Not used */
85 uint32_t a5; /* Not used */
86 uint32_t a6; /* Not used */
87 uint32_t a7; /* Hypervisor Client ID */
91 struct thread_smc_args {
92 uint64_t a0; /* SMC function ID */
93 uint64_t a1; /* Parameter */
94 uint64_t a2; /* Parameter */
95 uint64_t a3; /* Thread ID when returning from RPC */
96 uint64_t a4; /* Not used */
97 uint64_t a5; /* Not used */
98 uint64_t a6; /* Not used */
99 uint64_t a7; /* Hypervisor Client ID */
104 struct thread_abort_regs {
126 struct thread_abort_regs {
127 uint64_t x0; /* r0_usr */
128 uint64_t x1; /* r1_usr */
129 uint64_t x2; /* r2_usr */
130 uint64_t x3; /* r3_usr */
131 uint64_t x4; /* r4_usr */
132 uint64_t x5; /* r5_usr */
133 uint64_t x6; /* r6_usr */
134 uint64_t x7; /* r7_usr */
135 uint64_t x8; /* r8_usr */
136 uint64_t x9; /* r9_usr */
137 uint64_t x10; /* r10_usr */
138 uint64_t x11; /* r11_usr */
139 uint64_t x12; /* r12_usr */
140 uint64_t x13; /* r13/sp_usr */
141 uint64_t x14; /* r14/lr_usr */
165 struct thread_svc_regs {
179 struct thread_svc_regs {
182 uint64_t x0; /* r0_usr */
183 uint64_t x1; /* r1_usr */
184 uint64_t x2; /* r2_usr */
185 uint64_t x3; /* r3_usr */
186 uint64_t x4; /* r4_usr */
187 uint64_t x5; /* r5_usr */
188 uint64_t x6; /* r6_usr */
189 uint64_t x7; /* r7_usr */
190 uint64_t x8; /* r8_usr */
191 uint64_t x9; /* r9_usr */
192 uint64_t x10; /* r10_usr */
193 uint64_t x11; /* r11_usr */
194 uint64_t x12; /* r12_usr */
195 uint64_t x13; /* r13/sp_usr */
196 uint64_t x14; /* r14/lr_usr */
205 typedef void (*thread_smc_handler_t)(struct thread_smc_args *args);
206 typedef void (*thread_fiq_handler_t)(void);
207 typedef unsigned long (*thread_pm_handler_t)(unsigned long a0,
209 struct thread_handlers {
211 * stdcall and fastcall are called as regular functions and
212 * normal ARM Calling Convention applies. Return values are passed
213 * args->param{1-3} and forwarded into r0-r3 when returned to
216 * stdcall handles calls which can be preemted from non-secure
217 * world. This handler is executed with a large stack.
219 * fastcall handles fast calls which can't be preemted. This
220 * handler is executed with a limited stack. This handler must not
221 * cause any aborts or reenenable FIQs which are temporarily masked
222 * while executing this handler.
224 * TODO investigate if we should execute fastcalls and FIQs on
225 * different stacks allowing FIQs to be enabled during a fastcall.
227 thread_smc_handler_t std_smc;
228 thread_smc_handler_t fast_smc;
231 * fiq is called as a regular function and normal ARM Calling
232 * Convention applies.
234 * This handler handles FIQs which can't be preemted. This handler
235 * is executed with a limited stack. This handler must not cause
236 * any aborts or reenenable FIQs which are temporarily masked while
237 * executing this handler.
239 thread_fiq_handler_t fiq;
242 * Power management handlers triggered from ARM Trusted Firmware.
243 * Not used when using internal monitor.
245 thread_pm_handler_t cpu_on;
246 thread_pm_handler_t cpu_off;
247 thread_pm_handler_t cpu_suspend;
248 thread_pm_handler_t cpu_resume;
249 thread_pm_handler_t system_off;
250 thread_pm_handler_t system_reset;
252 void thread_init_primary(const struct thread_handlers *handlers);
253 void thread_init_per_cpu(void);
256 * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
257 * first stack, THREAD_ID_0 + 1 for the next and so on.
259 * Returns true on success and false on errors.
261 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
264 * Initializes a thread to be used during boot
266 void thread_init_boot_thread(void);
269 * Clears the current thread id
270 * Only supposed to be used during initialization.
272 void thread_clr_boot_thread(void);
275 * Returns current thread id.
277 int thread_get_id(void);
280 * Returns current thread id, return -1 on failure.
282 int thread_get_id_may_fail(void);
284 /* Returns Thread Specific Data (TSD) pointer. */
285 struct thread_specific_data *thread_get_tsd(void);
288 * Sets IRQ status for current thread, must only be called from an
289 * active thread context.
291 * enable == true -> enable IRQ
292 * enable == false -> disable IRQ
294 void thread_set_irq(bool enable);
297 * Restores the IRQ status (in CPSR) for current thread, must only be called
298 * from an active thread context.
300 void thread_restore_irq(void);
303 * Defines the bits for the exception mask used the the
304 * thread_*_exceptions() functions below.
306 #define THREAD_EXCP_FIQ (1 << 0)
307 #define THREAD_EXCP_IRQ (1 << 1)
308 #define THREAD_EXCP_ABT (1 << 2)
309 #define THREAD_EXCP_ALL (THREAD_EXCP_FIQ | THREAD_EXCP_IRQ | THREAD_EXCP_ABT)
312 * thread_get_exceptions() - return current exception mask
314 uint32_t thread_get_exceptions(void);
317 * thread_set_exceptions() - set exception mask
318 * @exceptions: exception mask to set
320 * Any previous exception mask is replaced by this exception mask, that is,
321 * old bits are cleared and replaced by these.
323 void thread_set_exceptions(uint32_t exceptions);
326 * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
327 * @exceptions exceptions to mask
328 * @returns old exception state
330 uint32_t thread_mask_exceptions(uint32_t exceptions);
333 * thread_unmask_exceptions() - Unmasks asynchronous exceptions
334 * @state Old asynchronous exception state to restore (returned by
335 * thread_mask_exceptions())
337 void thread_unmask_exceptions(uint32_t state);
340 static inline bool thread_irq_disabled(void)
342 return !!(thread_get_exceptions() & THREAD_EXCP_IRQ);
347 * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
349 * IRQ is masked while VFP is enabled. User space must not be entered before
350 * thread_kernel_disable_vfp() has been called to disable VFP and restore the
353 * This function may only be called from an active thread context and may
354 * not be called again before thread_kernel_disable_vfp() has been called.
356 * VFP state is saved as needed.
358 * Returns a state variable that should be passed to
359 * thread_kernel_disable_vfp().
361 uint32_t thread_kernel_enable_vfp(void);
364 * thread_kernel_disable_vfp() - Disables usage of VFP
365 * @state: state variable returned by thread_kernel_enable_vfp()
367 * Disables usage of VFP and restores IRQ status after a call to
368 * thread_kernel_enable_vfp().
370 * This function may only be called after a call to
371 * thread_kernel_enable_vfp().
373 void thread_kernel_disable_vfp(uint32_t state);
376 * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
378 void thread_kernel_save_vfp(void);
381 * thread_kernel_save_vfp() - Restores kernel vfp state
383 void thread_kernel_restore_vfp(void);
386 * thread_user_enable_vfp() - Enables vfp for user mode usage
387 * @uvfp: pointer to where to save the vfp state if needed
389 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
390 #else /*CFG_WITH_VFP*/
391 static inline void thread_kernel_save_vfp(void)
395 static inline void thread_kernel_restore_vfp(void)
398 #endif /*CFG_WITH_VFP*/
401 * thread_user_save_vfp() - Saves the user vfp state if enabled
404 void thread_user_save_vfp(void);
406 static inline void thread_user_save_vfp(void)
412 * thread_user_clear_vfp() - Clears the vfp state
413 * @uvfp: pointer to saved state to clear
416 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp);
418 static inline void thread_user_clear_vfp(
419 struct thread_user_vfp_state *uvfp __unused)
426 * thread_enter_user_mode() - Enters user mode
427 * @a0: Passed in r/x0 for user_func
428 * @a1: Passed in r/x1 for user_func
429 * @a2: Passed in r/x2 for user_func
430 * @a3: Passed in r/x3 for user_func
431 * @user_sp: Assigned sp value in user mode
432 * @user_func: Function to execute in user mode
433 * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
434 * @exit_status0: Pointer to opaque exit staus 0
435 * @exit_status1: Pointer to opaque exit staus 1
437 * This functions enters user mode with the argument described above,
438 * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
439 * when returning back to the caller of this function through an exception
442 * @Returns what's passed in "ret" to thread_unwind_user_mode()
444 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
445 unsigned long a2, unsigned long a3, unsigned long user_sp,
446 unsigned long entry_func, bool is_32bit,
447 uint32_t *exit_status0, uint32_t *exit_status1);
450 * thread_unwind_user_mode() - Unwinds kernel stack from user entry
451 * @ret: Value to return from thread_enter_user_mode()
452 * @exit_status0: Exit status 0
453 * @exit_status1: Exit status 1
455 * This is the function that exception handlers can return into
456 * to resume execution in kernel mode instead of user mode.
458 * This function is closely coupled with thread_enter_user_mode() since it
459 * need to restore registers saved by thread_enter_user_mode() and when it
460 * returns make it look like thread_enter_user_mode() just returned. It is
461 * expected that the stack pointer is where thread_enter_user_mode() left
462 * it. The stack will be unwound and the function will return to where
463 * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
464 * are filled in the corresponding pointers supplied to
465 * thread_enter_user_mode().
467 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
468 uint32_t exit_status1);
472 * thread_get_saved_thread_sp() - Returns the saved sp of current thread
474 * When switching from the thread stack pointer the value is stored
475 * separately in the current thread context. This function returns this
478 * @returns stack pointer
480 vaddr_t thread_get_saved_thread_sp(void);
483 bool thread_addr_is_in_stack(vaddr_t va);
486 * Adds a mutex to the list of held mutexes for current thread
487 * Requires IRQs to be disabled.
489 void thread_add_mutex(struct mutex *m);
492 * Removes a mutex from the list of held mutexes for current thread
493 * Requires IRQs to be disabled.
495 void thread_rem_mutex(struct mutex *m);
498 * Disables and empties the prealloc RPC cache one reference at a time. If
499 * all threads are idle this function returns true and a cookie of one shm
500 * object which was removed from the cache. When the cache is empty *cookie
501 * is set to 0 and the cache is disabled else a valid cookie value. If one
502 * thread isn't idle this function returns false.
504 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
507 * Enabled the prealloc RPC cache. If all threads are idle the cache is
508 * enabled and this function returns true. If one thread isn't idle this
509 * function return false.
511 bool thread_enable_prealloc_rpc_cache(void);
514 * Allocates data for struct optee_msg_arg.
516 * @size: size in bytes of struct optee_msg_arg
517 * @arg: returned physcial pointer to a struct optee_msg_arg buffer,
518 * 0 if allocation failed.
519 * @cookie: returned cookie used when freeing the buffer
521 void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie);
524 * Free physical memory previously allocated with thread_rpc_alloc_arg()
526 * @cookie: cookie received when allocating the buffer
528 void thread_rpc_free_arg(uint64_t cookie);
531 * Allocates data for payload buffers.
533 * @size: size in bytes of payload buffer
534 * @payload: returned physcial pointer to payload buffer, 0 if allocation
536 * @cookie: returned cookie used when freeing the buffer
538 void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie);
541 * Free physical memory previously allocated with thread_rpc_alloc_payload()
543 * @cookie: cookie received when allocating the buffer
545 void thread_rpc_free_payload(uint64_t cookie);
548 * Does an RPC using a preallocated argument buffer
550 * @num_params: number of parameters (max 2)
551 * @params: RPC parameters
552 * @returns RPC return value
554 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
555 struct optee_msg_param *params);
559 #endif /*KERNEL_THREAD_H*/