Update from upstream to 2.4.0 version
[platform/core/security/tef-optee_os.git] / core / arch / arm / kernel / user_ta.c
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * Copyright (c) 2015-2017 Linaro Limited
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <assert.h>
30 #include <compiler.h>
31 #include <keep.h>
32 #include <kernel/panic.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/user_ta.h>
36 #include <mm/core_memprot.h>
37 #include <mm/core_mmu.h>
38 #include <mm/mobj.h>
39 #include <mm/pgt_cache.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_mmu.h>
42 #include <mm/tee_pager.h>
43 #include <optee_msg_supplicant.h>
44 #include <signed_hdr.h>
45 #include <stdlib.h>
46 #include <ta_pub_key.h>
47 #include <tee/tee_cryp_provider.h>
48 #include <tee/tee_cryp_utl.h>
49 #include <tee/tee_obj.h>
50 #include <tee/tee_svc_cryp.h>
51 #include <tee/tee_svc.h>
52 #include <tee/tee_svc_storage.h>
53 #include <tee/uuid.h>
54 #include <trace.h>
55 #include <types_ext.h>
56 #include <utee_defines.h>
57 #include <util.h>
58
59 #include "elf_load.h"
60 #include "elf_common.h"
61
62 #define STACK_ALIGNMENT   (sizeof(long) * 2)
63
64 static TEE_Result load_header(const struct shdr *signed_ta,
65                 struct shdr **sec_shdr)
66 {
67         size_t s;
68
69         if (!tee_vbuf_is_non_sec(signed_ta, sizeof(*signed_ta)))
70                 return TEE_ERROR_SECURITY;
71
72         s = SHDR_GET_SIZE(signed_ta);
73         if (!tee_vbuf_is_non_sec(signed_ta, s))
74                 return TEE_ERROR_SECURITY;
75
76         /* Copy signed header into secure memory */
77         *sec_shdr = malloc(s);
78         if (!*sec_shdr)
79                 return TEE_ERROR_OUT_OF_MEMORY;
80         memcpy(*sec_shdr, signed_ta, s);
81
82         return TEE_SUCCESS;
83 }
84
85 static TEE_Result check_shdr(struct shdr *shdr)
86 {
87         struct rsa_public_key key;
88         TEE_Result res;
89         uint32_t e = TEE_U32_TO_BIG_ENDIAN(ta_pub_key_exponent);
90         size_t hash_size;
91
92         if (shdr->magic != SHDR_MAGIC || shdr->img_type != SHDR_TA)
93                 return TEE_ERROR_SECURITY;
94
95         if (TEE_ALG_GET_MAIN_ALG(shdr->algo) != TEE_MAIN_ALGO_RSA)
96                 return TEE_ERROR_SECURITY;
97
98         res = tee_hash_get_digest_size(TEE_DIGEST_HASH_TO_ALGO(shdr->algo),
99                                        &hash_size);
100         if (res != TEE_SUCCESS)
101                 return res;
102         if (hash_size != shdr->hash_size)
103                 return TEE_ERROR_SECURITY;
104
105         if (!crypto_ops.acipher.alloc_rsa_public_key ||
106             !crypto_ops.acipher.free_rsa_public_key ||
107             !crypto_ops.acipher.rsassa_verify ||
108             !crypto_ops.bignum.bin2bn)
109                 return TEE_ERROR_NOT_SUPPORTED;
110
111         res = crypto_ops.acipher.alloc_rsa_public_key(&key, shdr->sig_size);
112         if (res != TEE_SUCCESS)
113                 return res;
114
115         res = crypto_ops.bignum.bin2bn((uint8_t *)&e, sizeof(e), key.e);
116         if (res != TEE_SUCCESS)
117                 goto out;
118         res = crypto_ops.bignum.bin2bn(ta_pub_key_modulus,
119                                        ta_pub_key_modulus_size, key.n);
120         if (res != TEE_SUCCESS)
121                 goto out;
122
123         res = crypto_ops.acipher.rsassa_verify(shdr->algo, &key, -1,
124                                 SHDR_GET_HASH(shdr), shdr->hash_size,
125                                 SHDR_GET_SIG(shdr), shdr->sig_size);
126 out:
127         crypto_ops.acipher.free_rsa_public_key(&key);
128         if (res != TEE_SUCCESS)
129                 return TEE_ERROR_SECURITY;
130         return TEE_SUCCESS;
131 }
132
133 static uint32_t elf_flags_to_mattr(uint32_t flags, bool init_attrs)
134 {
135         uint32_t mattr = 0;
136
137         if (init_attrs)
138                 mattr = TEE_MATTR_PRW;
139         else {
140                 if (flags & PF_X)
141                         mattr |= TEE_MATTR_UX;
142                 if (flags & PF_W)
143                         mattr |= TEE_MATTR_UW;
144                 if (flags & PF_R)
145                         mattr |= TEE_MATTR_UR;
146         }
147
148         return mattr;
149 }
150
151 #ifdef CFG_PAGED_USER_TA
152 static TEE_Result config_initial_paging(struct user_ta_ctx *utc)
153 {
154         size_t n;
155
156         for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
157                 if (!utc->mmu->regions[n].size)
158                         continue;
159                 if (!tee_pager_add_uta_area(utc, utc->mmu->regions[n].va,
160                                             utc->mmu->regions[n].size))
161                         return TEE_ERROR_GENERIC;
162         }
163         return TEE_SUCCESS;
164 }
165
166 static TEE_Result config_final_paging(struct user_ta_ctx *utc)
167 {
168         size_t n;
169         uint32_t flags;
170
171         tee_pager_assign_uta_tables(utc);
172
173         for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
174                 if (!utc->mmu->regions[n].size)
175                         continue;
176                 flags = utc->mmu->regions[n].attr &
177                         (TEE_MATTR_PRW | TEE_MATTR_URWX);
178                 if (!tee_pager_set_uta_area_attr(utc, utc->mmu->regions[n].va,
179                                                  utc->mmu->regions[n].size,
180                                                  flags))
181                         return TEE_ERROR_GENERIC;
182         }
183         return TEE_SUCCESS;
184 }
185 #else /*!CFG_PAGED_USER_TA*/
186 static TEE_Result config_initial_paging(struct user_ta_ctx *utc __unused)
187 {
188         return TEE_SUCCESS;
189 }
190
191 static TEE_Result config_final_paging(struct user_ta_ctx *utc)
192 {
193         void *va = (void *)utc->mmu->ta_private_vmem_start;
194         size_t vasize = utc->mmu->ta_private_vmem_end -
195                         utc->mmu->ta_private_vmem_start;
196
197         cache_op_inner(DCACHE_AREA_CLEAN, va, vasize);
198         cache_op_inner(ICACHE_AREA_INVALIDATE, va, vasize);
199         return TEE_SUCCESS;
200 }
201 #endif /*!CFG_PAGED_USER_TA*/
202
203 static TEE_Result load_elf_segments(struct user_ta_ctx *utc,
204                         struct elf_load_state *elf_state, bool init_attrs)
205 {
206         TEE_Result res;
207         uint32_t mattr;
208         size_t idx = 0;
209
210         tee_mmu_map_clear(utc);
211
212         /*
213          * Add stack segment
214          */
215         tee_mmu_map_stack(utc, utc->mobj_stack);
216
217         /*
218          * Add code segment
219          */
220         while (true) {
221                 vaddr_t offs;
222                 size_t size;
223                 uint32_t flags;
224
225                 res = elf_load_get_next_segment(elf_state, &idx, &offs, &size,
226                                                 &flags);
227                 if (res == TEE_ERROR_ITEM_NOT_FOUND)
228                         break;
229                 if (res != TEE_SUCCESS)
230                         return res;
231
232                 mattr = elf_flags_to_mattr(flags, init_attrs);
233                 res = tee_mmu_map_add_segment(utc, utc->mobj_code, offs, size,
234                                               mattr);
235                 if (res != TEE_SUCCESS)
236                         return res;
237         }
238
239         if (init_attrs)
240                 return config_initial_paging(utc);
241         else
242                 return config_final_paging(utc);
243 }
244
245 static struct mobj *alloc_ta_mem(size_t size)
246 {
247 #ifdef CFG_PAGED_USER_TA
248         return mobj_paged_alloc(size);
249 #else
250         return mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr);
251 #endif
252 }
253
254 static TEE_Result load_elf(struct user_ta_ctx *utc, struct shdr *shdr,
255                         const struct shdr *nmem_shdr)
256 {
257         TEE_Result res;
258         size_t hash_ctx_size;
259         void *hash_ctx = NULL;
260         uint32_t hash_algo;
261         uint8_t *nwdata = (uint8_t *)nmem_shdr + SHDR_GET_SIZE(shdr);
262         size_t nwdata_len = shdr->img_size;
263         void *digest = NULL;
264         struct elf_load_state *elf_state = NULL;
265         struct ta_head *ta_head;
266         void *p;
267         size_t vasize;
268
269         if (!tee_vbuf_is_non_sec(nwdata, nwdata_len))
270                 return TEE_ERROR_SECURITY;
271
272         if (!crypto_ops.hash.get_ctx_size || !crypto_ops.hash.init ||
273             !crypto_ops.hash.update || !crypto_ops.hash.final) {
274                 res = TEE_ERROR_NOT_IMPLEMENTED;
275                 goto out;
276         }
277         hash_algo = TEE_DIGEST_HASH_TO_ALGO(shdr->algo);
278         res = crypto_ops.hash.get_ctx_size(hash_algo, &hash_ctx_size);
279         if (res != TEE_SUCCESS)
280                 goto out;
281         hash_ctx = malloc(hash_ctx_size);
282         if (!hash_ctx) {
283                 res = TEE_ERROR_OUT_OF_MEMORY;
284                 goto out;
285         }
286         res = crypto_ops.hash.init(hash_ctx, hash_algo);
287         if (res != TEE_SUCCESS)
288                 goto out;
289         res = crypto_ops.hash.update(hash_ctx, hash_algo,
290                                      (uint8_t *)shdr, sizeof(struct shdr));
291         if (res != TEE_SUCCESS)
292                 goto out;
293
294         res = elf_load_init(hash_ctx, hash_algo, nwdata, nwdata_len,
295                             &elf_state);
296         if (res != TEE_SUCCESS)
297                 goto out;
298
299         res = elf_load_head(elf_state, sizeof(struct ta_head), &p, &vasize,
300                             &utc->is_32bit);
301         if (res != TEE_SUCCESS)
302                 goto out;
303         ta_head = p;
304
305         utc->mobj_code = alloc_ta_mem(vasize);
306         if (!utc->mobj_code) {
307                 res = TEE_ERROR_OUT_OF_MEMORY;
308                 goto out;
309         }
310
311         /* Currently all TA must execute from DDR */
312         if (!(ta_head->flags & TA_FLAG_EXEC_DDR)) {
313                 res = TEE_ERROR_BAD_FORMAT;
314                 goto out;
315         }
316         /* Temporary assignment to setup memory mapping */
317         utc->ctx.flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
318
319         /* Ensure proper aligment of stack */
320         utc->mobj_stack = alloc_ta_mem(ROUNDUP(ta_head->stack_size,
321                                                STACK_ALIGNMENT));
322         if (!utc->mobj_stack) {
323                 res = TEE_ERROR_OUT_OF_MEMORY;
324                 goto out;
325         }
326
327         /*
328          * Map physical memory into TA virtual memory
329          */
330
331         res = tee_mmu_init(utc);
332         if (res != TEE_SUCCESS)
333                 goto out;
334
335         res = load_elf_segments(utc, elf_state, true /* init attrs */);
336         if (res != TEE_SUCCESS)
337                 goto out;
338
339         tee_mmu_set_ctx(&utc->ctx);
340
341         res = elf_load_body(elf_state, tee_mmu_get_load_addr(&utc->ctx));
342         if (res != TEE_SUCCESS)
343                 goto out;
344
345         digest = malloc(shdr->hash_size);
346         if (!digest) {
347                 res = TEE_ERROR_OUT_OF_MEMORY;
348                 goto out;
349         }
350
351         res = crypto_ops.hash.final(hash_ctx, hash_algo, digest,
352                                     shdr->hash_size);
353         if (res != TEE_SUCCESS)
354                 goto out;
355
356         if (memcmp(digest, SHDR_GET_HASH(shdr), shdr->hash_size) != 0) {
357                 res = TEE_ERROR_SECURITY;
358                 goto out;
359         }
360
361         /*
362          * Replace the init attributes with attributes used when the TA is
363          * running.
364          */
365         res = load_elf_segments(utc, elf_state, false /* final attrs */);
366         if (res != TEE_SUCCESS)
367                 goto out;
368
369 out:
370         elf_load_final(elf_state);
371         free(digest);
372         free(hash_ctx);
373         return res;
374 }
375
376 /*-----------------------------------------------------------------------------
377  * Loads TA header and hashes.
378  * Verifies the TA signature.
379  * Returns context ptr and TEE_Result.
380  *---------------------------------------------------------------------------*/
381 static TEE_Result ta_load(const TEE_UUID *uuid, const struct shdr *signed_ta,
382                         struct tee_ta_ctx **ta_ctx)
383 {
384         TEE_Result res;
385         /* man_flags: mandatory flags */
386         uint32_t man_flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
387         /* opt_flags: optional flags */
388         uint32_t opt_flags = man_flags | TA_FLAG_SINGLE_INSTANCE |
389             TA_FLAG_MULTI_SESSION | TA_FLAG_SECURE_DATA_PATH |
390             TA_FLAG_INSTANCE_KEEP_ALIVE | TA_FLAG_CACHE_MAINTENANCE;
391         struct user_ta_ctx *utc = NULL;
392         struct shdr *sec_shdr = NULL;
393         struct ta_head *ta_head;
394
395         res = load_header(signed_ta, &sec_shdr);
396         if (res != TEE_SUCCESS)
397                 goto error_return;
398
399         res = check_shdr(sec_shdr);
400         if (res != TEE_SUCCESS)
401                 goto error_return;
402
403         /*
404          * ------------------------------------------------------------------
405          * 2nd step: Register context
406          * Alloc and init the ta context structure, alloc physical/virtual
407          * memories to store/map the TA.
408          * ------------------------------------------------------------------
409          */
410
411         /*
412          * Register context
413          */
414
415         /* code below must be protected by mutex (multi-threaded) */
416         utc = calloc(1, sizeof(struct user_ta_ctx));
417         if (!utc) {
418                 res = TEE_ERROR_OUT_OF_MEMORY;
419                 goto error_return;
420         }
421         TAILQ_INIT(&utc->open_sessions);
422         TAILQ_INIT(&utc->cryp_states);
423         TAILQ_INIT(&utc->objects);
424         TAILQ_INIT(&utc->storage_enums);
425 #if defined(CFG_SE_API)
426         utc->se_service = NULL;
427 #endif
428
429         res = load_elf(utc, sec_shdr, signed_ta);
430         if (res != TEE_SUCCESS)
431                 goto error_return;
432
433         utc->load_addr = tee_mmu_get_load_addr(&utc->ctx);
434         ta_head = (struct ta_head *)(vaddr_t)utc->load_addr;
435
436         if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) {
437                 res = TEE_ERROR_SECURITY;
438                 goto error_return;
439         }
440
441         /* check input flags bitmask consistency and save flags */
442         if ((ta_head->flags & opt_flags) != ta_head->flags ||
443             (ta_head->flags & man_flags) != man_flags) {
444                 EMSG("TA flag issue: flags=%x opt=%X man=%X",
445                      ta_head->flags, opt_flags, man_flags);
446                 res = TEE_ERROR_BAD_FORMAT;
447                 goto error_return;
448         }
449
450         utc->ctx.flags = ta_head->flags;
451         utc->ctx.uuid = ta_head->uuid;
452         utc->entry_func = ta_head->entry.ptr64;
453
454         utc->ctx.ref_count = 1;
455
456         condvar_init(&utc->ctx.busy_cv);
457         TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ctx, link);
458         *ta_ctx = &utc->ctx;
459
460         DMSG("ELF load address 0x%x", utc->load_addr);
461
462         tee_mmu_set_ctx(NULL);
463         /* end thread protection (multi-threaded) */
464
465         free(sec_shdr);
466         return TEE_SUCCESS;
467
468 error_return:
469         free(sec_shdr);
470         tee_mmu_set_ctx(NULL);
471         if (utc) {
472                 pgt_flush_ctx(&utc->ctx);
473                 tee_pager_rem_uta_areas(utc);
474                 tee_mmu_final(utc);
475                 mobj_free(utc->mobj_code);
476                 mobj_free(utc->mobj_stack);
477                 free(utc);
478         }
479         return res;
480 }
481
482 static void init_utee_param(struct utee_params *up,
483                         const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
484 {
485         size_t n;
486
487         up->types = p->types;
488         for (n = 0; n < TEE_NUM_PARAMS; n++) {
489                 uintptr_t a;
490                 uintptr_t b;
491
492                 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
493                 case TEE_PARAM_TYPE_MEMREF_INPUT:
494                 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
495                 case TEE_PARAM_TYPE_MEMREF_INOUT:
496                         a = (uintptr_t)va[n];
497                         b = p->u[n].mem.size;
498                         break;
499                 case TEE_PARAM_TYPE_VALUE_INPUT:
500                 case TEE_PARAM_TYPE_VALUE_INOUT:
501                         a = p->u[n].val.a;
502                         b = p->u[n].val.b;
503                         break;
504                 default:
505                         a = 0;
506                         b = 0;
507                         break;
508                 }
509                 /* See comment for struct utee_params in utee_types.h */
510                 up->vals[n * 2] = a;
511                 up->vals[n * 2 + 1] = b;
512         }
513 }
514
515 static void update_from_utee_param(struct tee_ta_param *p,
516                         const struct utee_params *up)
517 {
518         size_t n;
519
520         for (n = 0; n < TEE_NUM_PARAMS; n++) {
521                 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
522                 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
523                 case TEE_PARAM_TYPE_MEMREF_INOUT:
524                         /* See comment for struct utee_params in utee_types.h */
525                         p->u[n].mem.size = up->vals[n * 2 + 1];
526                         break;
527                 case TEE_PARAM_TYPE_VALUE_OUTPUT:
528                 case TEE_PARAM_TYPE_VALUE_INOUT:
529                         /* See comment for struct utee_params in utee_types.h */
530                         p->u[n].val.a = up->vals[n * 2];
531                         p->u[n].val.b = up->vals[n * 2 + 1];
532                         break;
533                 default:
534                         break;
535                 }
536         }
537 }
538
539 static void clear_vfp_state(struct user_ta_ctx *utc __unused)
540 {
541 #ifdef CFG_WITH_VFP
542         thread_user_clear_vfp(&utc->vfp);
543 #endif
544 }
545
546 static TEE_Result user_ta_enter(TEE_ErrorOrigin *err,
547                         struct tee_ta_session *session,
548                         enum utee_entry_func func, uint32_t cmd,
549                         struct tee_ta_param *param)
550 {
551         TEE_Result res;
552         struct utee_params *usr_params;
553         uaddr_t usr_stack;
554         struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
555         TEE_ErrorOrigin serr = TEE_ORIGIN_TEE;
556         struct tee_ta_session *s __maybe_unused;
557         void *param_va[TEE_NUM_PARAMS] = { NULL };
558
559         if (!(utc->ctx.flags & TA_FLAG_EXEC_DDR))
560                 panic("TA does not exec in DDR");
561
562         /* Map user space memory */
563         res = tee_mmu_map_param(utc, param, param_va);
564         if (res != TEE_SUCCESS)
565                 goto cleanup_return;
566
567         /* Switch to user ctx */
568         tee_ta_push_current_session(session);
569
570         /* Make room for usr_params at top of stack */
571         usr_stack = (uaddr_t)utc->mmu->regions[0].va + utc->mobj_stack->size;
572         usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
573         usr_params = (struct utee_params *)usr_stack;
574         init_utee_param(usr_params, param, param_va);
575
576         res = thread_enter_user_mode(func, tee_svc_kaddr_to_uref(session),
577                                      (vaddr_t)usr_params, cmd, usr_stack,
578                                      utc->entry_func, utc->is_32bit,
579                                      &utc->ctx.panicked, &utc->ctx.panic_code);
580
581         clear_vfp_state(utc);
582         /*
583          * According to GP spec the origin should allways be set to the
584          * TA after TA execution
585          */
586         serr = TEE_ORIGIN_TRUSTED_APP;
587
588         if (utc->ctx.panicked) {
589                 DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n",
590                      utc->ctx.panic_code);
591                 serr = TEE_ORIGIN_TEE;
592                 res = TEE_ERROR_TARGET_DEAD;
593         }
594
595         /* Copy out value results */
596         update_from_utee_param(param, usr_params);
597
598         s = tee_ta_pop_current_session();
599         assert(s == session);
600 cleanup_return:
601
602         /*
603          * Clear the cancel state now that the user TA has returned. The next
604          * time the TA will be invoked will be with a new operation and should
605          * not have an old cancellation pending.
606          */
607         session->cancel = false;
608
609         /*
610          * Can't update *err until now since it may point to an address
611          * mapped for the user mode TA.
612          */
613         *err = serr;
614
615         return res;
616 }
617
618 /*
619  * Load a TA via RPC with UUID defined by input param uuid. The virtual
620  * address of the TA is recieved in out parameter ta
621  *
622  * Function is not thread safe
623  */
624 static TEE_Result rpc_load(const TEE_UUID *uuid, struct shdr **ta,
625                         uint64_t *cookie_ta)
626 {
627         TEE_Result res;
628         struct optee_msg_param params[2];
629         paddr_t phta = 0;
630         uint64_t cta = 0;
631
632
633         if (!uuid || !ta || !cookie_ta)
634                 return TEE_ERROR_BAD_PARAMETERS;
635
636         memset(params, 0, sizeof(params));
637         params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
638         tee_uuid_to_octets((void *)&params[0].u.value, uuid);
639         params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
640         params[1].u.tmem.buf_ptr = 0;
641         params[1].u.tmem.size = 0;
642         params[1].u.tmem.shm_ref = 0;
643
644         res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
645         if (res != TEE_SUCCESS)
646                 return res;
647
648         thread_rpc_alloc_payload(params[1].u.tmem.size, &phta, &cta);
649         if (!phta)
650                 return TEE_ERROR_OUT_OF_MEMORY;
651
652         *ta = phys_to_virt(phta, MEM_AREA_NSEC_SHM);
653         if (!*ta) {
654                 res = TEE_ERROR_GENERIC;
655                 goto out;
656         }
657         *cookie_ta = cta;
658
659         params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
660         tee_uuid_to_octets((void *)&params[0].u.value, uuid);
661         params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
662         params[1].u.tmem.buf_ptr = phta;
663         params[1].u.tmem.shm_ref = cta;
664         /* Note that params[1].u.tmem.size is already assigned */
665
666         res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
667 out:
668         if (res != TEE_SUCCESS)
669                 thread_rpc_free_payload(cta);
670         return res;
671 }
672
673 static TEE_Result init_session_with_signed_ta(const TEE_UUID *uuid,
674                                 const struct shdr *signed_ta,
675                                 struct tee_ta_session *s)
676 {
677         TEE_Result res;
678
679         DMSG("   Load dynamic TA");
680         /* load and verify */
681         res = ta_load(uuid, signed_ta, &s->ctx);
682         if (res != TEE_SUCCESS)
683                 return res;
684
685         DMSG("      dyn TA : %pUl", (void *)&s->ctx->uuid);
686
687         return res;
688 }
689
690 static TEE_Result user_ta_enter_open_session(struct tee_ta_session *s,
691                         struct tee_ta_param *param, TEE_ErrorOrigin *eo)
692 {
693         return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0, param);
694 }
695
696 static TEE_Result user_ta_enter_invoke_cmd(struct tee_ta_session *s,
697                         uint32_t cmd, struct tee_ta_param *param,
698                         TEE_ErrorOrigin *eo)
699 {
700         return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd, param);
701 }
702
703 static void user_ta_enter_close_session(struct tee_ta_session *s)
704 {
705         TEE_ErrorOrigin eo;
706         struct tee_ta_param param = { 0 };
707
708         user_ta_enter(&eo, s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0, &param);
709 }
710
711 static void user_ta_dump_state(struct tee_ta_ctx *ctx)
712 {
713         struct user_ta_ctx *utc __maybe_unused = to_user_ta_ctx(ctx);
714         size_t n;
715
716         EMSG_RAW("- load addr : 0x%x    ctx-idr: %d",
717                  utc->load_addr, utc->context);
718         EMSG_RAW("- stack: 0x%" PRIxVA " %zu",
719                  utc->mmu->regions[0].va, utc->mobj_stack->size);
720         for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
721                 paddr_t pa = 0;
722
723                 if (utc->mmu->regions[n].mobj)
724                         mobj_get_pa(utc->mmu->regions[n].mobj,
725                                     utc->mmu->regions[n].offset, 0, &pa);
726
727                 EMSG_RAW("sect %zu : va %#" PRIxVA " pa %#" PRIxPA " %#zx",
728                          n, utc->mmu->regions[n].va, pa,
729                          utc->mmu->regions[n].size);
730         }
731 }
732 KEEP_PAGER(user_ta_dump_state);
733
734 static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
735 {
736         struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
737
738         tee_pager_rem_uta_areas(utc);
739
740         /*
741          * Clean all traces of the TA, both RO and RW data.
742          * No L2 cache maintenance to avoid sync problems
743          */
744         if (ctx->flags & TA_FLAG_EXEC_DDR) {
745                 void *va;
746
747                 if (utc->mobj_code) {
748                         va = mobj_get_va(utc->mobj_code, 0);
749                         if (va) {
750                                 memset(va, 0, utc->mobj_code->size);
751                                 cache_op_inner(DCACHE_AREA_CLEAN, va,
752                                                 utc->mobj_code->size);
753                         }
754                 }
755
756                 if (utc->mobj_stack) {
757                         va = mobj_get_va(utc->mobj_stack, 0);
758                         if (va) {
759                                 memset(va, 0, utc->mobj_stack->size);
760                                 cache_op_inner(DCACHE_AREA_CLEAN, va,
761                                                 utc->mobj_stack->size);
762                         }
763                 }
764         }
765
766         /*
767          * Close sessions opened by this TA
768          * Note that tee_ta_close_session() removes the item
769          * from the utc->open_sessions list.
770          */
771         while (!TAILQ_EMPTY(&utc->open_sessions)) {
772                 tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
773                                      &utc->open_sessions, KERN_IDENTITY);
774         }
775
776         tee_mmu_final(utc);
777         mobj_free(utc->mobj_code);
778         mobj_free(utc->mobj_stack);
779
780         /* Free cryp states created by this TA */
781         tee_svc_cryp_free_states(utc);
782         /* Close cryp objects opened by this TA */
783         tee_obj_close_all(utc);
784         /* Free emums created by this TA */
785         tee_svc_storage_close_all_enum(utc);
786         free(utc);
787 }
788
789 static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx)
790 {
791         return to_user_ta_ctx(ctx)->context;
792 }
793
794 static const struct tee_ta_ops user_ta_ops __rodata_unpaged = {
795         .enter_open_session = user_ta_enter_open_session,
796         .enter_invoke_cmd = user_ta_enter_invoke_cmd,
797         .enter_close_session = user_ta_enter_close_session,
798         .dump_state = user_ta_dump_state,
799         .destroy = user_ta_ctx_destroy,
800         .get_instance_id = user_ta_get_instance_id,
801 };
802
803 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
804                         struct tee_ta_session *s)
805 {
806         TEE_Result res;
807         struct shdr *ta = NULL;
808         uint64_t cookie_ta = 0;
809
810
811         /* Request TA from tee-supplicant */
812         res = rpc_load(uuid, &ta, &cookie_ta);
813         if (res != TEE_SUCCESS)
814                 return res;
815
816         res = init_session_with_signed_ta(uuid, ta, s);
817         /*
818          * Free normal world shared memory now that the TA either has been
819          * copied into secure memory or the TA failed to be initialized.
820          */
821         thread_rpc_free_payload(cookie_ta);
822
823         if (res == TEE_SUCCESS)
824                 s->ctx->ops = &user_ta_ops;
825         return res;
826 }