2 * Copyright (c) 2015, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
33 #include <kernel/generic_boot.h>
34 #include <kernel/thread.h>
35 #include <kernel/panic.h>
36 #include <kernel/misc.h>
37 #include <kernel/asan.h>
39 #include <mm/core_mmu.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_mmu.h>
43 #include <mm/tee_pager.h>
44 #include <sm/tee_mon.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <utee_defines.h>
51 #include <platform_config.h>
53 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
57 #if defined(CFG_WITH_VFP)
58 #include <kernel/vfp.h>
66 * In this file we're using unsigned long to represent physical pointers as
67 * they are received in a single register when OP-TEE is initially entered.
68 * This limits 32-bit systems to only use make use of the lower 32 bits
69 * of a physical address for initial parameters.
71 * 64-bit systems on the other hand can use full 64-bit physical pointers.
73 #define PADDR_INVALID ULONG_MAX
75 #if defined(CFG_BOOT_SECONDARY_REQUEST)
76 paddr_t ns_entry_addrs[CFG_TEE_CORE_NB_CORE] __early_bss;
77 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE] __early_bss;
80 #ifdef CFG_BOOT_SYNC_CPU
82 * Array used when booting, to synchronize cpu.
83 * When 0, the cpu has not started.
84 * When 1, it has started
86 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE] __early_bss;
89 /* May be overridden in plat-$(PLATFORM)/main.c */
90 __weak void plat_cpu_reset_late(void)
93 KEEP_PAGER(plat_cpu_reset_late);
95 /* May be overridden in plat-$(PLATFORM)/main.c */
96 __weak void plat_cpu_reset_early(void)
99 KEEP_PAGER(plat_cpu_reset_early);
101 /* May be overridden in plat-$(PLATFORM)/main.c */
102 __weak void main_init_gic(void)
106 /* May be overridden in plat-$(PLATFORM)/main.c */
107 __weak void main_secondary_init_gic(void)
111 #if defined(CFG_WITH_ARM_TRUSTED_FW)
112 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
114 assert(nsec_entry == PADDR_INVALID);
115 /* Do nothing as we don't have a secure monitor */
118 /* May be overridden in plat-$(PLATFORM)/main.c */
119 __weak void init_sec_mon(unsigned long nsec_entry)
121 struct sm_nsec_ctx *nsec_ctx;
123 assert(nsec_entry != PADDR_INVALID);
125 /* Initialize secure monitor */
126 nsec_ctx = sm_get_nsec_ctx();
127 nsec_ctx->mon_lr = nsec_entry;
128 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
133 #if defined(CFG_WITH_ARM_TRUSTED_FW)
134 static void init_vfp_nsec(void)
138 static void init_vfp_nsec(void)
140 /* Normal world can use CP10 and CP11 (SIMD/VFP) */
141 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
145 #if defined(CFG_WITH_VFP)
148 static void init_vfp_sec(void)
150 uint32_t cpacr = read_cpacr();
153 * Enable Advanced SIMD functionality.
154 * Enable use of D16-D31 of the Floating-point Extension register
157 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
159 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
162 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
163 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
169 static void init_vfp_sec(void)
171 /* Not using VFP until thread_kernel_enable_vfp() */
176 #else /* CFG_WITH_VFP */
178 static void init_vfp_sec(void)
184 #ifdef CFG_WITH_PAGER
186 static size_t get_block_size(void)
188 struct core_mmu_table_info tbl_info;
191 if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
192 panic("can't find mmu tables");
194 l = tbl_info.level - 1;
195 if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
196 panic("can't find mmu table upper level");
198 return 1 << tbl_info.shift;
201 static void init_runtime(unsigned long pageable_part)
204 size_t init_size = (size_t)__init_size;
205 size_t pageable_size = __pageable_end - __pageable_start;
206 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
207 TEE_SHA256_HASH_SIZE;
209 uint8_t *paged_store;
213 assert(pageable_size % SMALL_PAGE_SIZE == 0);
214 assert(hash_size == (size_t)__tmp_hashes_size);
217 * Zero BSS area. Note that globals that would normally would go
218 * into BSS which are used before this has to be put into .nozi.*
219 * to avoid getting overwritten.
221 memset(__bss_start, 0, __bss_end - __bss_start);
224 * This needs to be initialized early to support address lookup
225 * in MEM_AREA_TEE_RAM
227 if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
228 &tee_pager_tbl_info))
229 panic("can't find mmu tables");
231 if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
232 panic("Unsupported page size in translation table");
234 thread_init_boot_thread();
236 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
237 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
239 hashes = malloc(hash_size);
240 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
242 memcpy(hashes, __tmp_hashes_start, hash_size);
245 * Need tee_mm_sec_ddr initialized to be able to allocate secure
248 teecore_init_ta_ram();
250 mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
252 paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
253 /* Copy init part into pageable area */
254 memcpy(paged_store, __init_start, init_size);
255 /* Copy pageable part after init part into pageable area */
256 memcpy(paged_store + init_size,
257 phys_to_virt(pageable_part,
258 core_mmu_get_type_by_pa(pageable_part)),
259 __pageable_part_end - __pageable_part_start);
261 /* Check that hashes of what's in pageable area is OK */
262 DMSG("Checking hashes of pageable area");
263 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
264 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
265 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
268 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
269 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
270 if (res != TEE_SUCCESS) {
271 EMSG("Hash failed for page %zu at %p: res 0x%x",
278 * Copy what's not initialized in the last init page. Needed
279 * because we're not going fault in the init pages again. We can't
280 * fault in pages until we've switched to the new vector by calling
281 * thread_init_handlers() below.
283 if (init_size % SMALL_PAGE_SIZE) {
286 memcpy(__init_start + init_size, paged_store + init_size,
287 SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
289 p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
292 cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
293 cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
298 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
299 * is supplied to tee_pager_init() below.
301 block_size = get_block_size();
302 if (!tee_mm_init(&tee_mm_vcore,
303 ROUNDDOWN(CFG_TEE_RAM_START, block_size),
304 ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
306 SMALL_PAGE_SHIFT, 0))
307 panic("tee_mm_vcore init failed");
310 * Assign alias area for pager end of the small page block the rest
311 * of the binary is loaded into. We're taking more than needed, but
312 * we're guaranteed to not need more than the physical amount of
315 mm = tee_mm_alloc2(&tee_mm_vcore,
316 (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
321 * Claim virtual memory which isn't paged, note that there migth be
322 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
323 * claimed to avoid later allocations to get that memory.
324 * Linear memory (flat map core memory) ends there.
326 mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
327 (vaddr_t)(__pageable_start - tee_mm_vcore.lo));
331 * Allocate virtual memory for the pageable area and let the pager
332 * take charge of all the pages already assigned to that memory.
334 mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
337 if (!tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
338 TEE_MATTR_PRX, paged_store, hashes))
339 panic("failed to add pageable to vcore");
341 tee_pager_add_pages((vaddr_t)__pageable_start,
342 ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
343 tee_pager_add_pages((vaddr_t)__pageable_start +
344 ROUNDUP(init_size, SMALL_PAGE_SIZE),
345 (pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
346 SMALL_PAGE_SIZE, true);
351 #ifdef CFG_CORE_SANITIZE_KADDRESS
352 static void init_run_constructors(void)
356 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
357 ((void (*)(void))(*ctor))();
360 static void init_asan(void)
364 * CFG_ASAN_SHADOW_OFFSET is also supplied as
365 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
366 * Since all the needed values to calculate the value of
367 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
368 * calculate it in advance and hard code it into the platform
369 * conf.mk. Here where we have all the needed values we double
370 * check that the compiler is supplied the correct value.
373 #define __ASAN_SHADOW_START \
374 ROUNDUP(CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
375 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
376 #define __CFG_ASAN_SHADOW_OFFSET \
377 (__ASAN_SHADOW_START - (CFG_TEE_RAM_START / 8))
378 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
379 #undef __ASAN_SHADOW_START
380 #undef __CFG_ASAN_SHADOW_OFFSET
383 * Assign area covered by the shadow area, everything from start up
384 * to the beginning of the shadow area.
386 asan_set_shadowed((void *)CFG_TEE_LOAD_ADDR, &__asan_shadow_start);
389 * Add access to areas that aren't opened automatically by a
392 asan_tag_access(&__initcall_start, &__initcall_end);
393 asan_tag_access(&__ctor_list, &__ctor_end);
394 asan_tag_access(__rodata_start, __rodata_end);
395 asan_tag_access(__early_bss_start, __early_bss_end);
396 asan_tag_access(__nozi_start, __nozi_end);
398 init_run_constructors();
400 /* Everything is tagged correctly, let's start address sanitizing. */
403 #else /*CFG_CORE_SANITIZE_KADDRESS*/
404 static void init_asan(void)
407 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
409 static void init_runtime(unsigned long pageable_part __unused)
412 * Zero BSS area. Note that globals that would normally would go
413 * into BSS which are used before this has to be put into .nozi.*
414 * to avoid getting overwritten.
416 memset(__bss_start, 0, __bss_end - __bss_start);
418 thread_init_boot_thread();
421 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
424 * Initialized at this stage in the pager version of this function
427 teecore_init_ta_ram();
432 static int add_optee_dt_node(void *fdt)
437 if (fdt_path_offset(fdt, "/firmware/optee") >= 0) {
438 IMSG("OP-TEE Device Tree node already exists!\n");
442 offs = fdt_path_offset(fdt, "/firmware");
444 offs = fdt_path_offset(fdt, "/");
447 offs = fdt_add_subnode(fdt, offs, "firmware");
452 offs = fdt_add_subnode(fdt, offs, "optee");
456 ret = fdt_setprop_string(fdt, offs, "compatible", "linaro,optee-tz");
459 ret = fdt_setprop_string(fdt, offs, "method", "smc");
465 static int get_dt_cell_size(void *fdt, int offs, const char *cell_name,
469 const uint32_t *cell = fdt_getprop(fdt, offs, cell_name, &len);
471 if (len != sizeof(*cell))
473 *cell_size = fdt32_to_cpu(*cell);
474 if (*cell_size != 1 && *cell_size != 2)
479 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
481 if (cell_size == 1) {
482 uint32_t v = cpu_to_fdt32((uint32_t)val);
484 memcpy(data, &v, sizeof(v));
486 uint64_t v = cpu_to_fdt64(val);
488 memcpy(data, &v, sizeof(v));
492 static int add_optee_res_mem_dt_node(void *fdt)
496 uint32_t addr_size = 2;
497 uint32_t len_size = 2;
498 vaddr_t shm_va_start;
501 char subnode_name[80];
503 offs = fdt_path_offset(fdt, "/reserved-memory");
505 ret = get_dt_cell_size(fdt, offs, "#address-cells", &addr_size);
508 ret = get_dt_cell_size(fdt, offs, "#size-cells", &len_size);
512 offs = fdt_path_offset(fdt, "/");
515 offs = fdt_add_subnode(fdt, offs, "reserved-memory");
518 ret = fdt_setprop_cell(fdt, offs, "#address-cells", addr_size);
521 ret = fdt_setprop_cell(fdt, offs, "#size-cells", len_size);
524 ret = fdt_setprop(fdt, offs, "ranges", NULL, 0);
529 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_va_start, &shm_va_end);
530 shm_pa = virt_to_phys((void *)shm_va_start);
531 snprintf(subnode_name, sizeof(subnode_name),
532 "optee@0x%" PRIxPA, shm_pa);
533 offs = fdt_add_subnode(fdt, offs, subnode_name);
535 uint32_t data[addr_size + len_size] ;
537 set_dt_val(data, addr_size, shm_pa);
538 set_dt_val(data + addr_size, len_size,
539 shm_va_end - shm_va_start);
540 ret = fdt_setprop(fdt, offs, "reg", data, sizeof(data));
543 ret = fdt_setprop(fdt, offs, "no-map", NULL, 0);
552 static void init_fdt(unsigned long phys_fdt)
558 EMSG("Device Tree missing");
560 * No need to panic as we're not using the DT in OP-TEE
561 * yet, we're only adding some nodes for normal world use.
562 * This makes the switch to using DT easier as we can boot
563 * a newer OP-TEE with older boot loaders. Once we start to
564 * initialize devices based on DT we'll likely panic
565 * instead of returning here.
570 if (!core_mmu_add_mapping(MEM_AREA_IO_NSEC, phys_fdt, CFG_DTB_MAX_SIZE))
571 panic("failed to map fdt");
573 fdt = phys_to_virt(phys_fdt, MEM_AREA_IO_NSEC);
577 ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
579 EMSG("Invalid Device Tree at 0x%" PRIxPA ": error %d",
584 if (add_optee_dt_node(fdt))
585 panic("Failed to add OP-TEE Device Tree node");
587 if (add_optee_res_mem_dt_node(fdt))
588 panic("Failed to add OP-TEE reserved memory DT node");
592 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
598 static void init_fdt(unsigned long phys_fdt __unused)
603 static void init_primary_helper(unsigned long pageable_part,
604 unsigned long nsec_entry, unsigned long fdt)
607 * Mask asynchronous exceptions before switch to the thread vector
608 * as the thread handler requires those to be masked while
609 * executing with the temporary stack. The thread subsystem also
610 * asserts that IRQ is blocked when using most if its functions.
612 thread_set_exceptions(THREAD_EXCP_ALL);
615 init_runtime(pageable_part);
617 IMSG("Initializing (%s)\n", core_v_str);
619 thread_init_primary(generic_boot_get_handlers());
620 thread_init_per_cpu();
621 init_sec_mon(nsec_entry);
626 if (init_teecore() != TEE_SUCCESS)
628 DMSG("Primary CPU switching to normal world boot\n");
631 static void init_secondary_helper(unsigned long nsec_entry)
634 * Mask asynchronous exceptions before switch to the thread vector
635 * as the thread handler requires those to be masked while
636 * executing with the temporary stack. The thread subsystem also
637 * asserts that IRQ is blocked when using most if its functions.
639 thread_set_exceptions(THREAD_EXCP_ALL);
641 thread_init_per_cpu();
642 init_sec_mon(nsec_entry);
643 main_secondary_init_gic();
647 DMSG("Secondary CPU Switching to normal world boot\n");
650 #if defined(CFG_WITH_ARM_TRUSTED_FW)
651 struct thread_vector_table *
652 generic_boot_init_primary(unsigned long pageable_part, unsigned long u __unused,
655 init_primary_helper(pageable_part, PADDR_INVALID, fdt);
656 return &thread_vector_table;
659 unsigned long generic_boot_cpu_on_handler(unsigned long a0 __maybe_unused,
660 unsigned long a1 __unused)
662 DMSG("cpu %zu: a0 0x%lx", get_core_pos(), a0);
663 init_secondary_helper(PADDR_INVALID);
667 void generic_boot_init_primary(unsigned long pageable_part,
668 unsigned long nsec_entry, unsigned long fdt)
670 init_primary_helper(pageable_part, nsec_entry, fdt);
673 void generic_boot_init_secondary(unsigned long nsec_entry)
675 init_secondary_helper(nsec_entry);
679 #if defined(CFG_BOOT_SECONDARY_REQUEST)
680 int generic_boot_core_release(size_t core_idx, paddr_t entry)
682 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
685 ns_entry_addrs[core_idx] = entry;
687 spin_table[core_idx] = 1;
695 * spin until secondary boot request, then returns with
696 * the secondary core entry address.
698 paddr_t generic_boot_core_hpen(void)
700 #ifdef CFG_PSCI_ARM32
701 return ns_entry_addrs[get_core_pos()];
705 } while (!spin_table[get_core_pos()]);
707 return ns_entry_addrs[get_core_pos()];