2 * Copyright (c) 2014, STMicroelectronics International N.V.
3 * Copyright (c) 2015-2017 Linaro Limited
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
32 #include <kernel/panic.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/user_ta.h>
36 #include <mm/core_memprot.h>
37 #include <mm/core_mmu.h>
39 #include <mm/pgt_cache.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_mmu.h>
42 #include <mm/tee_pager.h>
43 #include <optee_msg_supplicant.h>
44 #include <signed_hdr.h>
46 #include <ta_pub_key.h>
47 #include <tee/tee_cryp_provider.h>
48 #include <tee/tee_cryp_utl.h>
49 #include <tee/tee_obj.h>
50 #include <tee/tee_svc_cryp.h>
51 #include <tee/tee_svc.h>
52 #include <tee/tee_svc_storage.h>
55 #include <types_ext.h>
56 #include <utee_defines.h>
60 #include "elf_common.h"
62 #define STACK_ALIGNMENT (sizeof(long) * 2)
64 static TEE_Result load_header(const struct shdr *signed_ta,
65 struct shdr **sec_shdr)
69 if (!tee_vbuf_is_non_sec(signed_ta, sizeof(*signed_ta)))
70 return TEE_ERROR_SECURITY;
72 s = SHDR_GET_SIZE(signed_ta);
73 if (!tee_vbuf_is_non_sec(signed_ta, s))
74 return TEE_ERROR_SECURITY;
76 /* Copy signed header into secure memory */
77 *sec_shdr = malloc(s);
79 return TEE_ERROR_OUT_OF_MEMORY;
80 memcpy(*sec_shdr, signed_ta, s);
85 static TEE_Result check_shdr(struct shdr *shdr)
87 struct rsa_public_key key;
89 uint32_t e = TEE_U32_TO_BIG_ENDIAN(ta_pub_key_exponent);
92 if (shdr->magic != SHDR_MAGIC || shdr->img_type != SHDR_TA)
93 return TEE_ERROR_SECURITY;
95 if (TEE_ALG_GET_MAIN_ALG(shdr->algo) != TEE_MAIN_ALGO_RSA)
96 return TEE_ERROR_SECURITY;
98 res = tee_hash_get_digest_size(TEE_DIGEST_HASH_TO_ALGO(shdr->algo),
100 if (res != TEE_SUCCESS)
102 if (hash_size != shdr->hash_size)
103 return TEE_ERROR_SECURITY;
105 if (!crypto_ops.acipher.alloc_rsa_public_key ||
106 !crypto_ops.acipher.free_rsa_public_key ||
107 !crypto_ops.acipher.rsassa_verify ||
108 !crypto_ops.bignum.bin2bn)
109 return TEE_ERROR_NOT_SUPPORTED;
111 res = crypto_ops.acipher.alloc_rsa_public_key(&key, shdr->sig_size);
112 if (res != TEE_SUCCESS)
115 res = crypto_ops.bignum.bin2bn((uint8_t *)&e, sizeof(e), key.e);
116 if (res != TEE_SUCCESS)
118 res = crypto_ops.bignum.bin2bn(ta_pub_key_modulus,
119 ta_pub_key_modulus_size, key.n);
120 if (res != TEE_SUCCESS)
123 res = crypto_ops.acipher.rsassa_verify(shdr->algo, &key, -1,
124 SHDR_GET_HASH(shdr), shdr->hash_size,
125 SHDR_GET_SIG(shdr), shdr->sig_size);
127 crypto_ops.acipher.free_rsa_public_key(&key);
128 if (res != TEE_SUCCESS)
129 return TEE_ERROR_SECURITY;
133 static uint32_t elf_flags_to_mattr(uint32_t flags, bool init_attrs)
138 mattr = TEE_MATTR_PRW;
141 mattr |= TEE_MATTR_UX;
143 mattr |= TEE_MATTR_UW;
145 mattr |= TEE_MATTR_UR;
151 #ifdef CFG_PAGED_USER_TA
152 static TEE_Result config_initial_paging(struct user_ta_ctx *utc)
156 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
157 if (!utc->mmu->regions[n].size)
159 if (!tee_pager_add_uta_area(utc, utc->mmu->regions[n].va,
160 utc->mmu->regions[n].size))
161 return TEE_ERROR_GENERIC;
166 static TEE_Result config_final_paging(struct user_ta_ctx *utc)
171 tee_pager_assign_uta_tables(utc);
173 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
174 if (!utc->mmu->regions[n].size)
176 flags = utc->mmu->regions[n].attr &
177 (TEE_MATTR_PRW | TEE_MATTR_URWX);
178 if (!tee_pager_set_uta_area_attr(utc, utc->mmu->regions[n].va,
179 utc->mmu->regions[n].size,
181 return TEE_ERROR_GENERIC;
185 #else /*!CFG_PAGED_USER_TA*/
186 static TEE_Result config_initial_paging(struct user_ta_ctx *utc __unused)
191 static TEE_Result config_final_paging(struct user_ta_ctx *utc)
193 void *va = (void *)utc->mmu->ta_private_vmem_start;
194 size_t vasize = utc->mmu->ta_private_vmem_end -
195 utc->mmu->ta_private_vmem_start;
197 cache_maintenance_l1(DCACHE_AREA_CLEAN, va, vasize);
198 cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va, vasize);
201 #endif /*!CFG_PAGED_USER_TA*/
203 static TEE_Result load_elf_segments(struct user_ta_ctx *utc,
204 struct elf_load_state *elf_state, bool init_attrs)
210 tee_mmu_map_clear(utc);
215 tee_mmu_map_stack(utc, utc->mobj_stack);
225 res = elf_load_get_next_segment(elf_state, &idx, &offs, &size,
227 if (res == TEE_ERROR_ITEM_NOT_FOUND)
229 if (res != TEE_SUCCESS)
232 mattr = elf_flags_to_mattr(flags, init_attrs);
233 res = tee_mmu_map_add_segment(utc, utc->mobj_code, offs, size,
235 if (res != TEE_SUCCESS)
240 return config_initial_paging(utc);
242 return config_final_paging(utc);
245 static struct mobj *alloc_ta_mem(size_t size)
247 #ifdef CFG_PAGED_USER_TA
248 return mobj_paged_alloc(size);
250 return mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr);
254 static TEE_Result load_elf(struct user_ta_ctx *utc, struct shdr *shdr,
255 const struct shdr *nmem_shdr)
258 size_t hash_ctx_size;
259 void *hash_ctx = NULL;
261 uint8_t *nwdata = (uint8_t *)nmem_shdr + SHDR_GET_SIZE(shdr);
262 size_t nwdata_len = shdr->img_size;
264 struct elf_load_state *elf_state = NULL;
265 struct ta_head *ta_head;
269 if (!tee_vbuf_is_non_sec(nwdata, nwdata_len))
270 return TEE_ERROR_SECURITY;
272 if (!crypto_ops.hash.get_ctx_size || !crypto_ops.hash.init ||
273 !crypto_ops.hash.update || !crypto_ops.hash.final) {
274 res = TEE_ERROR_NOT_IMPLEMENTED;
277 hash_algo = TEE_DIGEST_HASH_TO_ALGO(shdr->algo);
278 res = crypto_ops.hash.get_ctx_size(hash_algo, &hash_ctx_size);
279 if (res != TEE_SUCCESS)
281 hash_ctx = malloc(hash_ctx_size);
283 res = TEE_ERROR_OUT_OF_MEMORY;
286 res = crypto_ops.hash.init(hash_ctx, hash_algo);
287 if (res != TEE_SUCCESS)
289 res = crypto_ops.hash.update(hash_ctx, hash_algo,
290 (uint8_t *)shdr, sizeof(struct shdr));
291 if (res != TEE_SUCCESS)
294 res = elf_load_init(hash_ctx, hash_algo, nwdata, nwdata_len,
296 if (res != TEE_SUCCESS)
299 res = elf_load_head(elf_state, sizeof(struct ta_head), &p, &vasize,
301 if (res != TEE_SUCCESS)
305 utc->mobj_code = alloc_ta_mem(vasize);
306 if (!utc->mobj_code) {
307 res = TEE_ERROR_OUT_OF_MEMORY;
311 /* Currently all TA must execute from DDR */
312 if (!(ta_head->flags & TA_FLAG_EXEC_DDR)) {
313 res = TEE_ERROR_BAD_FORMAT;
316 /* Temporary assignment to setup memory mapping */
317 utc->ctx.flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
319 /* Ensure proper aligment of stack */
320 utc->mobj_stack = alloc_ta_mem(ROUNDUP(ta_head->stack_size,
322 if (!utc->mobj_stack) {
323 res = TEE_ERROR_OUT_OF_MEMORY;
328 * Map physical memory into TA virtual memory
331 res = tee_mmu_init(utc);
332 if (res != TEE_SUCCESS)
335 res = load_elf_segments(utc, elf_state, true /* init attrs */);
336 if (res != TEE_SUCCESS)
339 tee_mmu_set_ctx(&utc->ctx);
341 res = elf_load_body(elf_state, tee_mmu_get_load_addr(&utc->ctx));
342 if (res != TEE_SUCCESS)
345 digest = malloc(shdr->hash_size);
347 res = TEE_ERROR_OUT_OF_MEMORY;
351 res = crypto_ops.hash.final(hash_ctx, hash_algo, digest,
353 if (res != TEE_SUCCESS)
356 if (memcmp(digest, SHDR_GET_HASH(shdr), shdr->hash_size) != 0) {
357 res = TEE_ERROR_SECURITY;
362 * Replace the init attributes with attributes used when the TA is
365 res = load_elf_segments(utc, elf_state, false /* final attrs */);
366 if (res != TEE_SUCCESS)
370 elf_load_final(elf_state);
376 /*-----------------------------------------------------------------------------
377 * Loads TA header and hashes.
378 * Verifies the TA signature.
379 * Returns context ptr and TEE_Result.
380 *---------------------------------------------------------------------------*/
381 static TEE_Result ta_load(const TEE_UUID *uuid, const struct shdr *signed_ta,
382 struct tee_ta_ctx **ta_ctx)
385 /* man_flags: mandatory flags */
386 uint32_t man_flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
387 /* opt_flags: optional flags */
388 uint32_t opt_flags = man_flags | TA_FLAG_SINGLE_INSTANCE |
389 TA_FLAG_MULTI_SESSION | TA_FLAG_UNSAFE_NW_PARAMS |
390 TA_FLAG_INSTANCE_KEEP_ALIVE | TA_FLAG_CACHE_MAINTENANCE;
391 struct user_ta_ctx *utc = NULL;
392 struct shdr *sec_shdr = NULL;
393 struct ta_head *ta_head;
395 res = load_header(signed_ta, &sec_shdr);
396 if (res != TEE_SUCCESS)
399 res = check_shdr(sec_shdr);
400 if (res != TEE_SUCCESS)
404 * ------------------------------------------------------------------
405 * 2nd step: Register context
406 * Alloc and init the ta context structure, alloc physical/virtual
407 * memories to store/map the TA.
408 * ------------------------------------------------------------------
415 /* code below must be protected by mutex (multi-threaded) */
416 utc = calloc(1, sizeof(struct user_ta_ctx));
418 res = TEE_ERROR_OUT_OF_MEMORY;
421 TAILQ_INIT(&utc->open_sessions);
422 TAILQ_INIT(&utc->cryp_states);
423 TAILQ_INIT(&utc->objects);
424 TAILQ_INIT(&utc->storage_enums);
425 #if defined(CFG_SE_API)
426 utc->se_service = NULL;
429 res = load_elf(utc, sec_shdr, signed_ta);
430 if (res != TEE_SUCCESS)
433 utc->load_addr = tee_mmu_get_load_addr(&utc->ctx);
434 ta_head = (struct ta_head *)(vaddr_t)utc->load_addr;
436 if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) {
437 res = TEE_ERROR_SECURITY;
441 /* check input flags bitmask consistency and save flags */
442 if ((ta_head->flags & opt_flags) != ta_head->flags ||
443 (ta_head->flags & man_flags) != man_flags) {
444 EMSG("TA flag issue: flags=%x opt=%X man=%X",
445 ta_head->flags, opt_flags, man_flags);
446 res = TEE_ERROR_BAD_FORMAT;
450 utc->ctx.flags = ta_head->flags;
451 utc->ctx.uuid = ta_head->uuid;
452 utc->entry_func = ta_head->entry.ptr64;
454 utc->ctx.ref_count = 1;
456 condvar_init(&utc->ctx.busy_cv);
457 TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ctx, link);
460 DMSG("ELF load address 0x%x", utc->load_addr);
462 tee_mmu_set_ctx(NULL);
463 /* end thread protection (multi-threaded) */
470 tee_mmu_set_ctx(NULL);
472 pgt_flush_ctx(&utc->ctx);
473 tee_pager_rem_uta_areas(utc);
475 mobj_free(utc->mobj_code);
476 mobj_free(utc->mobj_stack);
482 static void init_utee_param(struct utee_params *up,
483 const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
487 up->types = p->types;
488 for (n = 0; n < TEE_NUM_PARAMS; n++) {
492 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
493 case TEE_PARAM_TYPE_MEMREF_INPUT:
494 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
495 case TEE_PARAM_TYPE_MEMREF_INOUT:
496 a = (uintptr_t)va[n];
497 b = p->u[n].mem.size;
499 case TEE_PARAM_TYPE_VALUE_INPUT:
500 case TEE_PARAM_TYPE_VALUE_INOUT:
509 /* See comment for struct utee_params in utee_types.h */
511 up->vals[n * 2 + 1] = b;
515 static void update_from_utee_param(struct tee_ta_param *p,
516 const struct utee_params *up)
520 for (n = 0; n < TEE_NUM_PARAMS; n++) {
521 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
522 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
523 case TEE_PARAM_TYPE_MEMREF_INOUT:
524 /* See comment for struct utee_params in utee_types.h */
525 p->u[n].mem.size = up->vals[n * 2 + 1];
527 case TEE_PARAM_TYPE_VALUE_OUTPUT:
528 case TEE_PARAM_TYPE_VALUE_INOUT:
529 /* See comment for struct utee_params in utee_types.h */
530 p->u[n].val.a = up->vals[n * 2];
531 p->u[n].val.b = up->vals[n * 2 + 1];
539 static void clear_vfp_state(struct user_ta_ctx *utc __unused)
542 thread_user_clear_vfp(&utc->vfp);
546 static TEE_Result user_ta_enter(TEE_ErrorOrigin *err,
547 struct tee_ta_session *session,
548 enum utee_entry_func func, uint32_t cmd,
549 struct tee_ta_param *param)
552 struct utee_params *usr_params;
554 struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
555 TEE_ErrorOrigin serr = TEE_ORIGIN_TEE;
556 struct tee_ta_session *s __maybe_unused;
557 void *param_va[TEE_NUM_PARAMS] = { NULL };
559 if (!(utc->ctx.flags & TA_FLAG_EXEC_DDR))
560 panic("TA does not exec in DDR");
562 /* Map user space memory */
563 res = tee_mmu_map_param(utc, param, param_va);
564 if (res != TEE_SUCCESS)
567 /* Switch to user ctx */
568 tee_ta_push_current_session(session);
570 /* Make room for usr_params at top of stack */
571 usr_stack = (uaddr_t)utc->mmu->regions[0].va + utc->mobj_stack->size;
572 usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
573 usr_params = (struct utee_params *)usr_stack;
574 init_utee_param(usr_params, param, param_va);
576 res = thread_enter_user_mode(func, tee_svc_kaddr_to_uref(session),
577 (vaddr_t)usr_params, cmd, usr_stack,
578 utc->entry_func, utc->is_32bit,
579 &utc->ctx.panicked, &utc->ctx.panic_code);
581 clear_vfp_state(utc);
583 * According to GP spec the origin should allways be set to the
584 * TA after TA execution
586 serr = TEE_ORIGIN_TRUSTED_APP;
588 if (utc->ctx.panicked) {
589 DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n",
590 utc->ctx.panic_code);
591 serr = TEE_ORIGIN_TEE;
592 res = TEE_ERROR_TARGET_DEAD;
595 /* Copy out value results */
596 update_from_utee_param(param, usr_params);
598 s = tee_ta_pop_current_session();
599 assert(s == session);
603 * Clear the cancel state now that the user TA has returned. The next
604 * time the TA will be invoked will be with a new operation and should
605 * not have an old cancellation pending.
607 session->cancel = false;
610 * Can't update *err until now since it may point to an address
611 * mapped for the user mode TA.
619 * Load a TA via RPC with UUID defined by input param uuid. The virtual
620 * address of the TA is recieved in out parameter ta
622 * Function is not thread safe
624 static TEE_Result rpc_load(const TEE_UUID *uuid, struct shdr **ta,
628 struct optee_msg_param params[2];
633 if (!uuid || !ta || !cookie_ta)
634 return TEE_ERROR_BAD_PARAMETERS;
636 memset(params, 0, sizeof(params));
637 params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
638 tee_uuid_to_octets((void *)¶ms[0].u.value, uuid);
639 params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
640 params[1].u.tmem.buf_ptr = 0;
641 params[1].u.tmem.size = 0;
642 params[1].u.tmem.shm_ref = 0;
644 res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
645 if (res != TEE_SUCCESS)
648 thread_rpc_alloc_payload(params[1].u.tmem.size, &phta, &cta);
650 return TEE_ERROR_OUT_OF_MEMORY;
652 *ta = phys_to_virt(phta, MEM_AREA_NSEC_SHM);
654 res = TEE_ERROR_GENERIC;
659 params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
660 tee_uuid_to_octets((void *)¶ms[0].u.value, uuid);
661 params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
662 params[1].u.tmem.buf_ptr = phta;
663 params[1].u.tmem.shm_ref = cta;
664 /* Note that params[1].u.tmem.size is already assigned */
666 res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
668 if (res != TEE_SUCCESS)
669 thread_rpc_free_payload(cta);
673 static TEE_Result init_session_with_signed_ta(const TEE_UUID *uuid,
674 const struct shdr *signed_ta,
675 struct tee_ta_session *s)
679 DMSG(" Load dynamic TA");
680 /* load and verify */
681 res = ta_load(uuid, signed_ta, &s->ctx);
682 if (res != TEE_SUCCESS)
685 DMSG(" dyn TA : %pUl", (void *)&s->ctx->uuid);
690 static TEE_Result user_ta_enter_open_session(struct tee_ta_session *s,
691 struct tee_ta_param *param, TEE_ErrorOrigin *eo)
693 return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0, param);
696 static TEE_Result user_ta_enter_invoke_cmd(struct tee_ta_session *s,
697 uint32_t cmd, struct tee_ta_param *param,
700 return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd, param);
703 static void user_ta_enter_close_session(struct tee_ta_session *s)
706 struct tee_ta_param param = { 0 };
708 user_ta_enter(&eo, s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0, ¶m);
711 static void user_ta_dump_state(struct tee_ta_ctx *ctx)
713 struct user_ta_ctx *utc __maybe_unused = to_user_ta_ctx(ctx);
716 EMSG_RAW("- load addr : 0x%x ctx-idr: %d",
717 utc->load_addr, utc->context);
718 EMSG_RAW("- stack: 0x%" PRIxVA " %zu",
719 utc->mmu->regions[0].va, utc->mobj_stack->size);
720 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
723 if (utc->mmu->regions[n].mobj)
724 mobj_get_pa(utc->mmu->regions[n].mobj,
725 utc->mmu->regions[n].offset, 0, &pa);
727 EMSG_RAW("sect %zu : va %#" PRIxVA " pa %#" PRIxPA " %#zx",
728 n, utc->mmu->regions[n].va, pa,
729 utc->mmu->regions[n].size);
732 KEEP_PAGER(user_ta_dump_state);
734 static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
736 struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
738 tee_pager_rem_uta_areas(utc);
741 * Clean all traces of the TA, both RO and RW data.
742 * No L2 cache maintenance to avoid sync problems
744 if (ctx->flags & TA_FLAG_EXEC_DDR) {
747 if (utc->mobj_code) {
748 va = mobj_get_va(utc->mobj_code, 0);
750 memset(va, 0, utc->mobj_code->size);
751 cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
752 utc->mobj_code->size);
756 if (utc->mobj_stack) {
757 va = mobj_get_va(utc->mobj_stack, 0);
759 memset(va, 0, utc->mobj_stack->size);
760 cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
761 utc->mobj_stack->size);
767 * Close sessions opened by this TA
768 * Note that tee_ta_close_session() removes the item
769 * from the utc->open_sessions list.
771 while (!TAILQ_EMPTY(&utc->open_sessions)) {
772 tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
773 &utc->open_sessions, KERN_IDENTITY);
777 mobj_free(utc->mobj_code);
778 mobj_free(utc->mobj_stack);
780 /* Free cryp states created by this TA */
781 tee_svc_cryp_free_states(utc);
782 /* Close cryp objects opened by this TA */
783 tee_obj_close_all(utc);
784 /* Free emums created by this TA */
785 tee_svc_storage_close_all_enum(utc);
789 static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx)
791 return to_user_ta_ctx(ctx)->context;
794 static const struct tee_ta_ops user_ta_ops __rodata_unpaged = {
795 .enter_open_session = user_ta_enter_open_session,
796 .enter_invoke_cmd = user_ta_enter_invoke_cmd,
797 .enter_close_session = user_ta_enter_close_session,
798 .dump_state = user_ta_dump_state,
799 .destroy = user_ta_ctx_destroy,
800 .get_instance_id = user_ta_get_instance_id,
803 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
804 struct tee_ta_session *s)
807 struct shdr *ta = NULL;
808 uint64_t cookie_ta = 0;
811 /* Request TA from tee-supplicant */
812 res = rpc_load(uuid, &ta, &cookie_ta);
813 if (res != TEE_SUCCESS)
816 res = init_session_with_signed_ta(uuid, ta, s);
818 * Free normal world shared memory now that the TA either has been
819 * copied into secure memory or the TA failed to be initialized.
821 thread_rpc_free_payload(cookie_ta);
823 if (res == TEE_SUCCESS)
824 s->ctx->ops = &user_ta_ops;