2 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <types_ext.h>
35 #include <kernel/mutex.h>
36 #include <kernel/panic.h>
37 #include <kernel/pseudo_ta.h>
38 #include <kernel/tee_common.h>
39 #include <kernel/tee_misc.h>
40 #include <kernel/tee_ta_manager.h>
41 #include <kernel/tee_time.h>
42 #include <kernel/thread.h>
43 #include <kernel/user_ta.h>
44 #include <mm/core_mmu.h>
45 #include <mm/core_memprot.h>
47 #include <mm/tee_mmu.h>
48 #include <tee/tee_svc_cryp.h>
49 #include <tee/tee_obj.h>
50 #include <tee/tee_svc_storage.h>
51 #include <tee_api_types.h>
53 #include <utee_types.h>
56 /* This mutex protects the critical section in tee_ta_init_session */
57 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
58 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
59 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
60 static size_t tee_ta_single_instance_count;
61 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
63 static void lock_single_instance(void)
65 /* Requires tee_ta_mutex to be held */
66 if (tee_ta_single_instance_thread != thread_get_id()) {
67 /* Wait until the single-instance lock is available. */
68 while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
69 condvar_wait(&tee_ta_cv, &tee_ta_mutex);
71 tee_ta_single_instance_thread = thread_get_id();
72 assert(tee_ta_single_instance_count == 0);
75 tee_ta_single_instance_count++;
78 static void unlock_single_instance(void)
80 /* Requires tee_ta_mutex to be held */
81 assert(tee_ta_single_instance_thread == thread_get_id());
82 assert(tee_ta_single_instance_count > 0);
84 tee_ta_single_instance_count--;
85 if (tee_ta_single_instance_count == 0) {
86 tee_ta_single_instance_thread = THREAD_ID_INVALID;
87 condvar_signal(&tee_ta_cv);
91 static bool has_single_instance_lock(void)
93 /* Requires tee_ta_mutex to be held */
94 return tee_ta_single_instance_thread == thread_get_id();
97 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
101 mutex_lock(&tee_ta_mutex);
103 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
104 lock_single_instance();
106 if (has_single_instance_lock()) {
109 * We're holding the single-instance lock and the
110 * TA is busy, as waiting now would only cause a
111 * dead-lock, we release the lock and return false.
114 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
115 unlock_single_instance();
119 * We're not holding the single-instance lock, we're free to
120 * wait for the TA to become available.
123 condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
126 /* Either it's already true or we should set it to true */
129 mutex_unlock(&tee_ta_mutex);
133 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
135 if (!tee_ta_try_set_busy(ctx))
139 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
141 mutex_lock(&tee_ta_mutex);
145 condvar_signal(&ctx->busy_cv);
147 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
148 unlock_single_instance();
150 mutex_unlock(&tee_ta_mutex);
153 static void dec_session_ref_count(struct tee_ta_session *s)
155 assert(s->ref_count > 0);
157 if (s->ref_count == 1)
158 condvar_signal(&s->refc_cv);
161 void tee_ta_put_session(struct tee_ta_session *s)
163 mutex_lock(&tee_ta_mutex);
165 if (s->lock_thread == thread_get_id()) {
166 s->lock_thread = THREAD_ID_INVALID;
167 condvar_signal(&s->lock_cv);
169 dec_session_ref_count(s);
171 mutex_unlock(&tee_ta_mutex);
174 static struct tee_ta_session *find_session(uint32_t id,
175 struct tee_ta_session_head *open_sessions)
177 struct tee_ta_session *s;
179 TAILQ_FOREACH(s, open_sessions, link) {
180 if ((vaddr_t)s == id)
186 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
187 struct tee_ta_session_head *open_sessions)
189 struct tee_ta_session *s;
191 mutex_lock(&tee_ta_mutex);
194 s = find_session(id, open_sessions);
205 assert(s->lock_thread != thread_get_id());
207 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
208 condvar_wait(&s->lock_cv, &tee_ta_mutex);
211 dec_session_ref_count(s);
216 s->lock_thread = thread_get_id();
220 mutex_unlock(&tee_ta_mutex);
224 static void tee_ta_unlink_session(struct tee_ta_session *s,
225 struct tee_ta_session_head *open_sessions)
227 mutex_lock(&tee_ta_mutex);
229 assert(s->ref_count >= 1);
230 assert(s->lock_thread == thread_get_id());
234 condvar_broadcast(&s->lock_cv);
236 while (s->ref_count != 1)
237 condvar_wait(&s->refc_cv, &tee_ta_mutex);
239 TAILQ_REMOVE(open_sessions, s, link);
241 mutex_unlock(&tee_ta_mutex);
245 * tee_ta_context_find - Find TA in session list based on a UUID (input)
246 * Returns a pointer to the session
248 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
250 struct tee_ta_ctx *ctx;
252 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
253 if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
260 /* check if requester (client ID) matches session initial client */
261 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
263 if (id == KERN_IDENTITY)
266 if (id == NSAPP_IDENTITY) {
267 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
268 DMSG("nsec tries to hijack TA session");
269 return TEE_ERROR_ACCESS_DENIED;
274 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
275 DMSG("client id mismatch");
276 return TEE_ERROR_ACCESS_DENIED;
282 * Check if invocation parameters matches TA properties
284 * @s - current session handle
285 * @param - already identified memory references hold a valid 'mobj'.
288 * - All TAs can access 'non-secure' shared memory.
289 * - All TAs can access TEE private memory (seccpy)
290 * - Only SDP flagged TAs can accept SDP memory references.
292 #ifndef CFG_SECURE_DATA_PATH
293 static bool check_params(struct tee_ta_session *sess __unused,
294 struct tee_ta_param *param __unused)
297 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
298 * are rejected at OP-TEE core entry. Hence here all TAs have same
299 * permissions regarding memory reference parameters.
304 static bool check_params(struct tee_ta_session *sess,
305 struct tee_ta_param *param)
310 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
311 * SDP memory references. Only TAs flagged SDP can access SDP memory.
313 if (sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
316 for (n = 0; n < TEE_NUM_PARAMS; n++) {
317 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
318 struct param_mem *mem = ¶m->u[n].mem;
320 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
321 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
322 param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
326 if (mobj_is_sdp_mem(mem->mobj))
333 static void set_invoke_timeout(struct tee_ta_session *sess,
334 uint32_t cancel_req_to)
336 TEE_Time current_time;
337 TEE_Time cancel_time = { UINT32_MAX, UINT32_MAX };
339 if (cancel_req_to == TEE_TIMEOUT_INFINITE)
342 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
345 /* Check that it doesn't wrap */
346 if (current_time.seconds + (cancel_req_to / 1000) >=
347 current_time.seconds) {
348 cancel_time.seconds =
349 current_time.seconds + cancel_req_to / 1000;
350 cancel_time.millis = current_time.millis + cancel_req_to % 1000;
351 if (cancel_time.millis > 1000) {
352 cancel_time.seconds++;
353 cancel_time.millis -= 1000;
358 sess->cancel_time = cancel_time;
361 /*-----------------------------------------------------------------------------
362 * Close a Trusted Application and free available resources
363 *---------------------------------------------------------------------------*/
364 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
365 struct tee_ta_session_head *open_sessions,
366 const TEE_Identity *clnt_id)
368 struct tee_ta_session *sess;
369 struct tee_ta_ctx *ctx;
371 DMSG("tee_ta_close_session(0x%" PRIxVA ")", (vaddr_t)csess);
374 return TEE_ERROR_ITEM_NOT_FOUND;
376 sess = tee_ta_get_session((vaddr_t)csess, true, open_sessions);
379 EMSG("session 0x%" PRIxVA " to be removed is not found",
381 return TEE_ERROR_ITEM_NOT_FOUND;
384 if (check_client(sess, clnt_id) != TEE_SUCCESS) {
385 tee_ta_put_session(sess);
386 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
390 DMSG(" ... Destroy session");
392 tee_ta_set_busy(ctx);
394 if (!ctx->panicked) {
395 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
396 ctx->ops->enter_close_session(sess);
399 tee_ta_unlink_session(sess, open_sessions);
400 #if defined(CFG_TA_GPROF_SUPPORT)
405 tee_ta_clear_busy(ctx);
407 mutex_lock(&tee_ta_mutex);
409 if (ctx->ref_count <= 0)
413 if (!ctx->ref_count && !(ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE)) {
414 DMSG(" ... Destroy TA ctx");
416 TAILQ_REMOVE(&tee_ctxes, ctx, link);
417 mutex_unlock(&tee_ta_mutex);
419 condvar_destroy(&ctx->busy_cv);
422 ctx->ops->destroy(ctx);
424 mutex_unlock(&tee_ta_mutex);
429 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
430 struct tee_ta_session *s)
433 * If TA isn't single instance it should be loaded as new
434 * instance instead of doing anything with this instance.
435 * So tell the caller that we didn't find the TA it the
436 * caller will load a new instance.
438 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
439 return TEE_ERROR_ITEM_NOT_FOUND;
442 * The TA is single instance, if it isn't multi session we
443 * can't create another session unless it's the first
444 * new session towards a keepAlive TA.
447 if (((ctx->flags & TA_FLAG_MULTI_SESSION) == 0) &&
448 !(((ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) != 0) &&
449 (ctx->ref_count == 0)))
450 return TEE_ERROR_BUSY;
452 DMSG(" ... Re-open TA %pUl", (void *)&ctx->uuid);
460 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
461 struct tee_ta_session_head *open_sessions,
462 const TEE_UUID *uuid,
463 struct tee_ta_session **sess)
466 struct tee_ta_ctx *ctx;
467 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
469 *err = TEE_ORIGIN_TEE;
471 return TEE_ERROR_OUT_OF_MEMORY;
473 s->cancel_mask = true;
474 condvar_init(&s->refc_cv);
475 condvar_init(&s->lock_cv);
476 s->lock_thread = THREAD_ID_INVALID;
481 * We take the global TA mutex here and hold it while doing
482 * RPC to load the TA. This big critical section should be broken
483 * down into smaller pieces.
487 mutex_lock(&tee_ta_mutex);
488 TAILQ_INSERT_TAIL(open_sessions, s, link);
490 /* Look for already loaded TA */
491 ctx = tee_ta_context_find(uuid);
493 res = tee_ta_init_session_with_context(ctx, s);
494 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
498 /* Look for static TA */
499 res = tee_ta_init_pseudo_ta_session(uuid, s);
500 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
503 /* Look for user TA */
504 res = tee_ta_init_user_ta_session(uuid, s);
507 if (res == TEE_SUCCESS) {
510 TAILQ_REMOVE(open_sessions, s, link);
513 mutex_unlock(&tee_ta_mutex);
517 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
518 struct tee_ta_session **sess,
519 struct tee_ta_session_head *open_sessions,
520 const TEE_UUID *uuid,
521 const TEE_Identity *clnt_id,
522 uint32_t cancel_req_to,
523 struct tee_ta_param *param)
526 struct tee_ta_session *s = NULL;
527 struct tee_ta_ctx *ctx;
529 bool was_busy = false;
531 res = tee_ta_init_session(err, open_sessions, uuid, &s);
532 if (res != TEE_SUCCESS) {
533 DMSG("init session failed 0x%x", res);
537 if (!check_params(s, param))
538 return TEE_ERROR_BAD_PARAMETERS;
543 DMSG("panicked, call tee_ta_close_session()");
544 tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
545 *err = TEE_ORIGIN_TEE;
546 return TEE_ERROR_TARGET_DEAD;
550 /* Save identity of the owner of the session */
551 s->clnt_id = *clnt_id;
553 if (tee_ta_try_set_busy(ctx)) {
554 set_invoke_timeout(s, cancel_req_to);
555 res = ctx->ops->enter_open_session(s, param, err);
556 tee_ta_clear_busy(ctx);
558 /* Deadlock avoided */
559 res = TEE_ERROR_BUSY;
563 panicked = ctx->panicked;
565 tee_ta_put_session(s);
566 if (panicked || (res != TEE_SUCCESS))
567 tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
570 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
571 * apart from panicking.
573 if (panicked || was_busy)
574 *err = TEE_ORIGIN_TEE;
576 *err = TEE_ORIGIN_TRUSTED_APP;
578 if (res != TEE_SUCCESS)
579 EMSG("Failed. Return error 0x%x", res);
584 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
585 struct tee_ta_session *sess,
586 const TEE_Identity *clnt_id,
587 uint32_t cancel_req_to, uint32_t cmd,
588 struct tee_ta_param *param)
592 if (check_client(sess, clnt_id) != TEE_SUCCESS)
593 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
595 if (!check_params(sess, param))
596 return TEE_ERROR_BAD_PARAMETERS;
598 if (sess->ctx->panicked) {
600 *err = TEE_ORIGIN_TEE;
601 return TEE_ERROR_TARGET_DEAD;
604 tee_ta_set_busy(sess->ctx);
606 set_invoke_timeout(sess, cancel_req_to);
607 res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
609 if (sess->ctx->panicked) {
610 *err = TEE_ORIGIN_TEE;
611 res = TEE_ERROR_TARGET_DEAD;
614 tee_ta_clear_busy(sess->ctx);
615 if (res != TEE_SUCCESS)
616 DMSG(" => Error: %x of %d\n", res, *err);
620 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
621 struct tee_ta_session *sess,
622 const TEE_Identity *clnt_id)
624 *err = TEE_ORIGIN_TEE;
626 if (check_client(sess, clnt_id) != TEE_SUCCESS)
627 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
633 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
635 TEE_Time current_time;
643 if (s->cancel_time.seconds == UINT32_MAX)
646 if (curr_time != NULL)
647 current_time = *curr_time;
648 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
651 if (current_time.seconds > s->cancel_time.seconds ||
652 (current_time.seconds == s->cancel_time.seconds &&
653 current_time.millis >= s->cancel_time.millis)) {
660 static void update_current_ctx(struct thread_specific_data *tsd)
662 struct tee_ta_ctx *ctx = NULL;
663 struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
666 if (is_pseudo_ta_ctx(s->ctx))
667 s = TAILQ_NEXT(s, link_tsd);
674 tee_mmu_set_ctx(ctx);
676 * If ctx->mmu == NULL we must not have user mapping active,
677 * if ctx->mmu != NULL we must have user mapping active.
679 if (((ctx && is_user_ta_ctx(ctx) ?
680 to_user_ta_ctx(ctx)->mmu : NULL) == NULL) ==
681 core_mmu_user_mapping_is_active())
682 panic("unexpected active mapping");
685 void tee_ta_push_current_session(struct tee_ta_session *sess)
687 struct thread_specific_data *tsd = thread_get_tsd();
689 TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
690 update_current_ctx(tsd);
693 struct tee_ta_session *tee_ta_pop_current_session(void)
695 struct thread_specific_data *tsd = thread_get_tsd();
696 struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
699 TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
700 update_current_ctx(tsd);
705 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
707 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
710 return TEE_ERROR_BAD_STATE;
715 struct tee_ta_session *tee_ta_get_calling_session(void)
717 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
720 s = TAILQ_NEXT(s, link_tsd);
724 TEE_Result tee_ta_get_client_id(TEE_Identity *id)
727 struct tee_ta_session *sess;
729 res = tee_ta_get_current_session(&sess);
730 if (res != TEE_SUCCESS)
734 return TEE_ERROR_BAD_PARAMETERS;
741 * dump_state - Display TA state as an error log.
743 static void dump_state(struct tee_ta_ctx *ctx)
745 struct tee_ta_session *s = NULL;
746 bool active __maybe_unused;
748 active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
751 EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
752 active ? "(active)" : "");
753 ctx->ops->dump_state(ctx);
756 void tee_ta_dump_current(void)
758 struct tee_ta_session *s = NULL;
760 if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
761 EMSG("no valid session found, cannot log TA status");
768 #if defined(CFG_TA_GPROF_SUPPORT)
769 void tee_ta_gprof_sample_pc(vaddr_t pc)
771 struct tee_ta_session *s;
772 struct sample_buf *sbuf;
775 if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
778 if (!sbuf || !sbuf->enabled)
779 return; /* PC sampling is not enabled */
781 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
782 if (idx < sbuf->nsamples)
783 sbuf->samples[idx]++;
788 * Update user-mode CPU time for the current session
789 * @suspend: true if session is being suspended (leaving user mode), false if
790 * it is resumed (entering user mode)
792 static void tee_ta_update_session_utime(bool suspend)
794 struct tee_ta_session *s;
795 struct sample_buf *sbuf;
798 if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
805 assert(sbuf->usr_entered);
806 sbuf->usr += now - sbuf->usr_entered;
807 sbuf->usr_entered = 0;
809 assert(!sbuf->usr_entered);
811 now++; /* 0 is reserved */
812 sbuf->usr_entered = now;
816 void tee_ta_update_session_utime_suspend(void)
818 tee_ta_update_session_utime(true);
821 void tee_ta_update_session_utime_resume(void)
823 tee_ta_update_session_utime(false);