8f13c36a06b0e2e716c250d909dfdfe7ee9fb898
[platform/core/security/tef-optee_os.git] / core / arch / arm / kernel / generic_boot.c
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 #include <arm.h>
29 #include <assert.h>
30 #include <compiler.h>
31 #include <inttypes.h>
32 #include <keep.h>
33 #include <kernel/generic_boot.h>
34 #include <kernel/thread.h>
35 #include <kernel/panic.h>
36 #include <kernel/misc.h>
37 #include <kernel/asan.h>
38 #include <malloc.h>
39 #include <mm/core_mmu.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_mmu.h>
43 #include <mm/tee_pager.h>
44 #include <sm/tee_mon.h>
45 #include <trace.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <utee_defines.h>
48 #include <util.h>
49 #include <stdio.h>
50
51 #include <platform_config.h>
52
53 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
54 #include <sm/sm.h>
55 #endif
56
57 #if defined(CFG_WITH_VFP)
58 #include <kernel/vfp.h>
59 #endif
60
61 #if defined(CFG_DT)
62 #include <libfdt.h>
63 #endif
64
65 /*
66  * In this file we're using unsigned long to represent physical pointers as
67  * they are received in a single register when OP-TEE is initially entered.
68  * This limits 32-bit systems to only use make use of the lower 32 bits
69  * of a physical address for initial parameters.
70  *
71  * 64-bit systems on the other hand can use full 64-bit physical pointers.
72  */
73 #define PADDR_INVALID           ULONG_MAX
74
75 #if defined(CFG_BOOT_SECONDARY_REQUEST)
76 paddr_t ns_entry_addrs[CFG_TEE_CORE_NB_CORE] __early_bss;
77 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE] __early_bss;
78 #endif
79
80 #ifdef CFG_BOOT_SYNC_CPU
81 /*
82  * Array used when booting, to synchronize cpu.
83  * When 0, the cpu has not started.
84  * When 1, it has started
85  */
86 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE] __early_bss;
87 #endif
88
89 /* May be overridden in plat-$(PLATFORM)/main.c */
90 __weak void plat_cpu_reset_late(void)
91 {
92 }
93 KEEP_PAGER(plat_cpu_reset_late);
94
95 /* May be overridden in plat-$(PLATFORM)/main.c */
96 __weak void plat_cpu_reset_early(void)
97 {
98 }
99 KEEP_PAGER(plat_cpu_reset_early);
100
101 /* May be overridden in plat-$(PLATFORM)/main.c */
102 __weak void main_init_gic(void)
103 {
104 }
105
106 /* May be overridden in plat-$(PLATFORM)/main.c */
107 __weak void main_secondary_init_gic(void)
108 {
109 }
110
111 #if defined(CFG_WITH_ARM_TRUSTED_FW)
112 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
113 {
114         assert(nsec_entry == PADDR_INVALID);
115         /* Do nothing as we don't have a secure monitor */
116 }
117 #else
118 /* May be overridden in plat-$(PLATFORM)/main.c */
119 __weak void init_sec_mon(unsigned long nsec_entry)
120 {
121         struct sm_nsec_ctx *nsec_ctx;
122
123         assert(nsec_entry != PADDR_INVALID);
124
125         /* Initialize secure monitor */
126         nsec_ctx = sm_get_nsec_ctx();
127         nsec_ctx->mon_lr = nsec_entry;
128         nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
129
130 }
131 #endif
132
133 #if defined(CFG_WITH_ARM_TRUSTED_FW)
134 static void init_vfp_nsec(void)
135 {
136 }
137 #else
138 static void init_vfp_nsec(void)
139 {
140         /* Normal world can use CP10 and CP11 (SIMD/VFP) */
141         write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
142 }
143 #endif
144
145 #if defined(CFG_WITH_VFP)
146
147 #ifdef ARM32
148 static void init_vfp_sec(void)
149 {
150         uint32_t cpacr = read_cpacr();
151
152         /*
153          * Enable Advanced SIMD functionality.
154          * Enable use of D16-D31 of the Floating-point Extension register
155          * file.
156          */
157         cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
158         /*
159          * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
160          * mode.
161          */
162         cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
163         cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
164         write_cpacr(cpacr);
165 }
166 #endif /* ARM32 */
167
168 #ifdef ARM64
169 static void init_vfp_sec(void)
170 {
171         /* Not using VFP until thread_kernel_enable_vfp() */
172         vfp_disable();
173 }
174 #endif /* ARM64 */
175
176 #else /* CFG_WITH_VFP */
177
178 static void init_vfp_sec(void)
179 {
180         /* Not using VFP */
181 }
182 #endif
183
184 #ifdef CFG_WITH_PAGER
185
186 static size_t get_block_size(void)
187 {
188         struct core_mmu_table_info tbl_info;
189         unsigned l;
190
191         if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
192                 panic("can't find mmu tables");
193
194         l = tbl_info.level - 1;
195         if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
196                 panic("can't find mmu table upper level");
197
198         return 1 << tbl_info.shift;
199 }
200
201 static void init_runtime(unsigned long pageable_part)
202 {
203         size_t n;
204         size_t init_size = (size_t)__init_size;
205         size_t pageable_size = __pageable_end - __pageable_start;
206         size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
207                            TEE_SHA256_HASH_SIZE;
208         tee_mm_entry_t *mm;
209         uint8_t *paged_store;
210         uint8_t *hashes;
211         size_t block_size;
212
213         assert(pageable_size % SMALL_PAGE_SIZE == 0);
214         assert(hash_size == (size_t)__tmp_hashes_size);
215
216         /*
217          * Zero BSS area. Note that globals that would normally would go
218          * into BSS which are used before this has to be put into .nozi.*
219          * to avoid getting overwritten.
220          */
221         memset(__bss_start, 0, __bss_end - __bss_start);
222
223         /*
224          * This needs to be initialized early to support address lookup
225          * in MEM_AREA_TEE_RAM
226          */
227         if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
228                                  &tee_pager_tbl_info))
229                 panic("can't find mmu tables");
230
231         if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
232                 panic("Unsupported page size in translation table");
233
234         thread_init_boot_thread();
235
236         malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
237         malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
238
239         hashes = malloc(hash_size);
240         IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
241         assert(hashes);
242         memcpy(hashes, __tmp_hashes_start, hash_size);
243
244         /*
245          * Need tee_mm_sec_ddr initialized to be able to allocate secure
246          * DDR below.
247          */
248         teecore_init_ta_ram();
249
250         mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
251         assert(mm);
252         paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
253         /* Copy init part into pageable area */
254         memcpy(paged_store, __init_start, init_size);
255         /* Copy pageable part after init part into pageable area */
256         memcpy(paged_store + init_size,
257                phys_to_virt(pageable_part,
258                             core_mmu_get_type_by_pa(pageable_part)),
259                 __pageable_part_end - __pageable_part_start);
260
261         /* Check that hashes of what's in pageable area is OK */
262         DMSG("Checking hashes of pageable area");
263         for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
264                 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
265                 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
266                 TEE_Result res;
267
268                 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
269                 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
270                 if (res != TEE_SUCCESS) {
271                         EMSG("Hash failed for page %zu at %p: res 0x%x",
272                              n, page, res);
273                         panic();
274                 }
275         }
276
277         /*
278          * Copy what's not initialized in the last init page. Needed
279          * because we're not going fault in the init pages again. We can't
280          * fault in pages until we've switched to the new vector by calling
281          * thread_init_handlers() below.
282          */
283         if (init_size % SMALL_PAGE_SIZE) {
284                 uint8_t *p;
285
286                 memcpy(__init_start + init_size, paged_store + init_size,
287                         SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
288
289                 p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
290                                 ~SMALL_PAGE_MASK);
291
292                 cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
293                 cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
294                                      SMALL_PAGE_SIZE);
295         }
296
297         /*
298          * Initialize the virtual memory pool used for main_mmu_l2_ttb which
299          * is supplied to tee_pager_init() below.
300          */
301         block_size = get_block_size();
302         if (!tee_mm_init(&tee_mm_vcore,
303                         ROUNDDOWN(CFG_TEE_RAM_START, block_size),
304                         ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
305                                 block_size),
306                         SMALL_PAGE_SHIFT, 0))
307                 panic("tee_mm_vcore init failed");
308
309         /*
310          * Assign alias area for pager end of the small page block the rest
311          * of the binary is loaded into. We're taking more than needed, but
312          * we're guaranteed to not need more than the physical amount of
313          * TZSRAM.
314          */
315         mm = tee_mm_alloc2(&tee_mm_vcore,
316                 (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
317         assert(mm);
318         tee_pager_init(mm);
319
320         /*
321          * Claim virtual memory which isn't paged, note that there migth be
322          * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
323          * claimed to avoid later allocations to get that memory.
324          * Linear memory (flat map core memory) ends there.
325          */
326         mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
327                         (vaddr_t)(__pageable_start - tee_mm_vcore.lo));
328         assert(mm);
329
330         /*
331          * Allocate virtual memory for the pageable area and let the pager
332          * take charge of all the pages already assigned to that memory.
333          */
334         mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
335                            pageable_size);
336         assert(mm);
337         if (!tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
338                                      TEE_MATTR_PRX, paged_store, hashes))
339                 panic("failed to add pageable to vcore");
340
341         tee_pager_add_pages((vaddr_t)__pageable_start,
342                 ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
343         tee_pager_add_pages((vaddr_t)__pageable_start +
344                                 ROUNDUP(init_size, SMALL_PAGE_SIZE),
345                         (pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
346                                 SMALL_PAGE_SIZE, true);
347
348 }
349 #else
350
351 #ifdef CFG_CORE_SANITIZE_KADDRESS
352 static void init_run_constructors(void)
353 {
354         vaddr_t *ctor;
355
356         for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
357                 ((void (*)(void))(*ctor))();
358 }
359
360 static void init_asan(void)
361 {
362
363         /*
364          * CFG_ASAN_SHADOW_OFFSET is also supplied as
365          * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
366          * Since all the needed values to calculate the value of
367          * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
368          * calculate it in advance and hard code it into the platform
369          * conf.mk. Here where we have all the needed values we double
370          * check that the compiler is supplied the correct value.
371          */
372
373 #define __ASAN_SHADOW_START \
374         ROUNDUP(CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
375         assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
376 #define __CFG_ASAN_SHADOW_OFFSET \
377         (__ASAN_SHADOW_START - (CFG_TEE_RAM_START / 8))
378         COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
379 #undef __ASAN_SHADOW_START
380 #undef __CFG_ASAN_SHADOW_OFFSET
381
382         /*
383          * Assign area covered by the shadow area, everything from start up
384          * to the beginning of the shadow area.
385          */
386         asan_set_shadowed((void *)CFG_TEE_LOAD_ADDR, &__asan_shadow_start);
387
388         /*
389          * Add access to areas that aren't opened automatically by a
390          * constructor.
391          */
392         asan_tag_access(&__initcall_start, &__initcall_end);
393         asan_tag_access(&__ctor_list, &__ctor_end);
394         asan_tag_access(__rodata_start, __rodata_end);
395         asan_tag_access(__early_bss_start, __early_bss_end);
396         asan_tag_access(__nozi_start, __nozi_end);
397
398         init_run_constructors();
399
400         /* Everything is tagged correctly, let's start address sanitizing. */
401         asan_start();
402 }
403 #else /*CFG_CORE_SANITIZE_KADDRESS*/
404 static void init_asan(void)
405 {
406 }
407 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
408
409 static void init_runtime(unsigned long pageable_part __unused)
410 {
411         /*
412          * Zero BSS area. Note that globals that would normally would go
413          * into BSS which are used before this has to be put into .nozi.*
414          * to avoid getting overwritten.
415          */
416         memset(__bss_start, 0, __bss_end - __bss_start);
417
418         thread_init_boot_thread();
419
420         init_asan();
421         malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
422
423         /*
424          * Initialized at this stage in the pager version of this function
425          * above
426          */
427         teecore_init_ta_ram();
428 }
429 #endif
430
431 #ifdef CFG_DT
432 static int add_optee_dt_node(void *fdt)
433 {
434         int offs;
435         int ret;
436
437         if (fdt_path_offset(fdt, "/firmware/optee") >= 0) {
438                 IMSG("OP-TEE Device Tree node already exists!\n");
439                 return 0;
440         }
441
442         offs = fdt_path_offset(fdt, "/firmware");
443         if (offs < 0) {
444                 offs = fdt_path_offset(fdt, "/");
445                 if (offs < 0)
446                         return -1;
447                 offs = fdt_add_subnode(fdt, offs, "firmware");
448                 if (offs < 0)
449                         return -1;
450         }
451
452         offs = fdt_add_subnode(fdt, offs, "optee");
453         if (offs < 0)
454                 return -1;
455
456         ret = fdt_setprop_string(fdt, offs, "compatible", "linaro,optee-tz");
457         if (ret < 0)
458                 return -1;
459         ret = fdt_setprop_string(fdt, offs, "method", "smc");
460         if (ret < 0)
461                 return -1;
462         return 0;
463 }
464
465 static int get_dt_cell_size(void *fdt, int offs, const char *cell_name,
466                             uint32_t *cell_size)
467 {
468         int len;
469         const uint32_t *cell = fdt_getprop(fdt, offs, cell_name, &len);
470
471         if (len != sizeof(*cell))
472                 return -1;
473         *cell_size = fdt32_to_cpu(*cell);
474         if (*cell_size != 1 && *cell_size != 2)
475                 return -1;
476         return 0;
477 }
478
479 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
480 {
481         if (cell_size == 1) {
482                 uint32_t v = cpu_to_fdt32((uint32_t)val);
483
484                 memcpy(data, &v, sizeof(v));
485         } else {
486                 uint64_t v = cpu_to_fdt64(val);
487
488                 memcpy(data, &v, sizeof(v));
489         }
490 }
491
492 static int add_optee_res_mem_dt_node(void *fdt)
493 {
494         int offs;
495         int ret;
496         uint32_t addr_size = 2;
497         uint32_t len_size = 2;
498         vaddr_t shm_va_start;
499         vaddr_t shm_va_end;
500         paddr_t shm_pa;
501         char subnode_name[80];
502
503         offs = fdt_path_offset(fdt, "/reserved-memory");
504         if (offs >= 0) {
505                 ret = get_dt_cell_size(fdt, offs, "#address-cells", &addr_size);
506                 if (ret < 0)
507                         return -1;
508                 ret = get_dt_cell_size(fdt, offs, "#size-cells", &len_size);
509                 if (ret < 0)
510                         return -1;
511         } else {
512                 offs = fdt_path_offset(fdt, "/");
513                 if (offs < 0)
514                         return -1;
515                 offs = fdt_add_subnode(fdt, offs, "reserved-memory");
516                 if (offs < 0)
517                         return -1;
518                 ret = fdt_setprop_cell(fdt, offs, "#address-cells", addr_size);
519                 if (ret < 0)
520                         return -1;
521                 ret = fdt_setprop_cell(fdt, offs, "#size-cells", len_size);
522                 if (ret < 0)
523                         return -1;
524                 ret = fdt_setprop(fdt, offs, "ranges", NULL, 0);
525                 if (ret < 0)
526                         return -1;
527         }
528
529         core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_va_start, &shm_va_end);
530         shm_pa = virt_to_phys((void *)shm_va_start);
531         snprintf(subnode_name, sizeof(subnode_name),
532                  "optee@0x%" PRIxPA, shm_pa);
533         offs = fdt_add_subnode(fdt, offs, subnode_name);
534         if (offs >= 0) {
535                 uint32_t data[addr_size + len_size] ;
536
537                 set_dt_val(data, addr_size, shm_pa);
538                 set_dt_val(data + addr_size, len_size,
539                            shm_va_end - shm_va_start);
540                 ret = fdt_setprop(fdt, offs, "reg", data, sizeof(data));
541                 if (ret < 0)
542                         return -1;
543                 ret = fdt_setprop(fdt, offs, "no-map", NULL, 0);
544                 if (ret < 0)
545                         return -1;
546         } else {
547                 return -1;
548         }
549         return 0;
550 }
551
552 static void init_fdt(unsigned long phys_fdt)
553 {
554         void *fdt;
555         int ret;
556
557         if (!phys_fdt) {
558                 EMSG("Device Tree missing");
559                 /*
560                  * No need to panic as we're not using the DT in OP-TEE
561                  * yet, we're only adding some nodes for normal world use.
562                  * This makes the switch to using DT easier as we can boot
563                  * a newer OP-TEE with older boot loaders. Once we start to
564                  * initialize devices based on DT we'll likely panic
565                  * instead of returning here.
566                  */
567                 return;
568         }
569
570         if (!core_mmu_add_mapping(MEM_AREA_IO_NSEC, phys_fdt, CFG_DTB_MAX_SIZE))
571                 panic("failed to map fdt");
572
573         fdt = phys_to_virt(phys_fdt, MEM_AREA_IO_NSEC);
574         if (!fdt)
575                 panic();
576
577         ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
578         if (ret < 0) {
579                 EMSG("Invalid Device Tree at 0x%" PRIxPA ": error %d",
580                      phys_fdt, ret);
581                 panic();
582         }
583
584         if (add_optee_dt_node(fdt))
585                 panic("Failed to add OP-TEE Device Tree node");
586
587         if (add_optee_res_mem_dt_node(fdt))
588                 panic("Failed to add OP-TEE reserved memory DT node");
589
590         ret = fdt_pack(fdt);
591         if (ret < 0) {
592                 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
593                      phys_fdt, ret);
594                 panic();
595         }
596 }
597 #else
598 static void init_fdt(unsigned long phys_fdt __unused)
599 {
600 }
601 #endif /*!CFG_DT*/
602
603 static void init_primary_helper(unsigned long pageable_part,
604                                 unsigned long nsec_entry, unsigned long fdt)
605 {
606         /*
607          * Mask asynchronous exceptions before switch to the thread vector
608          * as the thread handler requires those to be masked while
609          * executing with the temporary stack. The thread subsystem also
610          * asserts that IRQ is blocked when using most if its functions.
611          */
612         thread_set_exceptions(THREAD_EXCP_ALL);
613         init_vfp_sec();
614
615         init_runtime(pageable_part);
616
617         IMSG("Initializing (%s)\n", core_v_str);
618
619         thread_init_primary(generic_boot_get_handlers());
620         thread_init_per_cpu();
621         init_sec_mon(nsec_entry);
622         init_fdt(fdt);
623         main_init_gic();
624         init_vfp_nsec();
625
626         if (init_teecore() != TEE_SUCCESS)
627                 panic();
628         DMSG("Primary CPU switching to normal world boot\n");
629 }
630
631 static void init_secondary_helper(unsigned long nsec_entry)
632 {
633         /*
634          * Mask asynchronous exceptions before switch to the thread vector
635          * as the thread handler requires those to be masked while
636          * executing with the temporary stack. The thread subsystem also
637          * asserts that IRQ is blocked when using most if its functions.
638          */
639         thread_set_exceptions(THREAD_EXCP_ALL);
640
641         thread_init_per_cpu();
642         init_sec_mon(nsec_entry);
643         main_secondary_init_gic();
644         init_vfp_sec();
645         init_vfp_nsec();
646
647         DMSG("Secondary CPU Switching to normal world boot\n");
648 }
649
650 #if defined(CFG_WITH_ARM_TRUSTED_FW)
651 struct thread_vector_table *
652 generic_boot_init_primary(unsigned long pageable_part, unsigned long u __unused,
653                           unsigned long fdt)
654 {
655         init_primary_helper(pageable_part, PADDR_INVALID, fdt);
656         return &thread_vector_table;
657 }
658
659 unsigned long generic_boot_cpu_on_handler(unsigned long a0 __maybe_unused,
660                                      unsigned long a1 __unused)
661 {
662         DMSG("cpu %zu: a0 0x%lx", get_core_pos(), a0);
663         init_secondary_helper(PADDR_INVALID);
664         return 0;
665 }
666 #else
667 void generic_boot_init_primary(unsigned long pageable_part,
668                                unsigned long nsec_entry, unsigned long fdt)
669 {
670         init_primary_helper(pageable_part, nsec_entry, fdt);
671 }
672
673 void generic_boot_init_secondary(unsigned long nsec_entry)
674 {
675         init_secondary_helper(nsec_entry);
676 }
677 #endif
678
679 #if defined(CFG_BOOT_SECONDARY_REQUEST)
680 int generic_boot_core_release(size_t core_idx, paddr_t entry)
681 {
682         if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
683                 return -1;
684
685         ns_entry_addrs[core_idx] = entry;
686         dmb();
687         spin_table[core_idx] = 1;
688         dsb();
689         sev();
690
691         return 0;
692 }
693
694 /*
695  * spin until secondary boot request, then returns with
696  * the secondary core entry address.
697  */
698 paddr_t generic_boot_core_hpen(void)
699 {
700 #ifdef CFG_PSCI_ARM32
701         return ns_entry_addrs[get_core_pos()];
702 #else
703         do {
704                 wfe();
705         } while (!spin_table[get_core_pos()]);
706         dmb();
707         return ns_entry_addrs[get_core_pos()];
708 #endif
709 }
710 #endif