2 * Copyright (c) 2014, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <platform_config.h>
32 #include <arm32_macros.S>
33 #include <sm/optee_smc.h>
34 #include <sm/teesmc_opteed_macros.h>
35 #include <sm/teesmc_opteed.h>
36 #include <kernel/unwind.h>
37 #include <kernel/asan.h>
42 #ifdef CFG_BOOT_SYNC_CPU
52 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
55 LOCAL_FUNC __assert_flat_mapped_range , :
69 * this must be compliant with the panic generic routine:
70 * __do_panic(__FILE__, __LINE__, __func__, str)
72 ldr r0, =panic_boot_file
77 b . /* should NOT return */
80 END_FUNC __assert_flat_mapped_range
82 /* panic if mmu is enable and vaddr != paddr (scratch lr) */
83 .macro assert_flat_mapped_range va, line
86 bl __assert_flat_mapped_range
88 #endif /* CFG_PL310 */
95 b . /* Prefetch abort */
103 #ifdef CFG_BOOT_SYNC_CPU
107 ldr r2, =SEM_CPU_READY
115 #ifdef CFG_BOOT_SYNC_CPU
116 ldr r0, =sem_cpu_sync
117 mov r2, #SEM_CPU_READY
127 .macro wait_secondary
128 #ifdef CFG_BOOT_SYNC_CPU
129 ldr r0, =sem_cpu_sync
130 mov r3, #CFG_TEE_CORE_NB_CORE
131 mov r2, #SEM_CPU_READY
148 * Save boot arguments
149 * entry r0, saved r4: pagestore
150 * entry r1, saved r7: (ARMv7 standard bootarg #1)
151 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
152 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
154 .macro bootargs_entry
155 #if defined(CFG_NS_ENTRY_ADDR)
156 ldr r5, =CFG_NS_ENTRY_ADDR
160 #if defined(CFG_PAGEABLE_ADDR)
161 ldr r4, =CFG_PAGEABLE_ADDR
165 #if defined(CFG_DT_ADDR)
179 /* Enable alignment checks and disable data and instruction cache. */
187 /* Early ARM secure MP specific configuration */
188 bl plat_cpu_reset_early
193 #if defined(CFG_WITH_ARM_TRUSTED_FW)
205 * Setup sp to point to the top of the tmp stack for the current CPU:
206 * sp is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
211 cmp r0, #CFG_TEE_CORE_NB_CORE
212 /* Unsupported CPU, park it before it breaks something */
215 ldr r2, =stack_tmp_stride
220 ldr r2, =stack_tmp_offset
226 * Cache maintenance during entry: handle outer cache.
227 * End address is exclusive: first byte not to be changed.
228 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
230 * Use ANSI #define to trap source file line number for PL310 assertion
232 .macro __inval_cache_vrange vbase, vend, line
234 assert_flat_mapped_range (\vbase), (\line)
245 .macro __flush_cache_vrange vbase, vend, line
247 assert_flat_mapped_range (\vbase), (\line)
250 bl arm_cl1_d_cleanbyva
254 bl arm_cl2_cleaninvbypa
258 bl arm_cl1_d_cleaninvbyva
261 #define inval_cache_vrange(vbase, vend) \
262 __inval_cache_vrange (vbase), ((vend) - 1), __LINE__
264 #define flush_cache_vrange(vbase, vend) \
265 __flush_cache_vrange (vbase), ((vend) - 1), __LINE__
267 #ifdef CFG_BOOT_SYNC_CPU
268 #define flush_cpu_semaphores \
269 flush_cache_vrange(sem_cpu_sync, \
270 (sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)))
272 #define flush_cpu_semaphores
275 LOCAL_FUNC reset_primary , :
279 /* preserve r4-r7: bootargs */
281 #ifdef CFG_WITH_PAGER
283 * Move init code into correct location and move hashes to a
284 * temporary safe location until the heap is initialized.
286 * The binary is built as:
287 * [Pager code, rodata and data] : In correct location
288 * [Init code and rodata] : Should be copied to __text_init_start
289 * [Hashes] : Should be saved before initializing pager
292 ldr r0, =__text_init_start /* dst */
293 ldr r1, =__data_end /* src */
294 ldr r2, =__tmp_hashes_end /* dst limit */
295 /* Copy backwards (as memmove) in case we're overlapping */
296 sub r2, r2, r0 /* len */
299 ldr r2, =__text_init_start
301 ldmdb r1!, {r3, r8-r12, sp}
302 stmdb r0!, {r3, r8-r12, sp}
308 #ifdef CFG_CORE_SANITIZE_KADDRESS
309 /* First initialize the entire shadow area with no access */
310 ldr r0, =__asan_shadow_start /* start */
311 ldr r1, =__asan_shadow_end /* limit */
312 mov r2, #ASAN_DATA_RED_ZONE
318 /* Mark the entire stack area as OK */
319 ldr r2, =CFG_ASAN_SHADOW_OFFSET
320 ldr r0, =__nozi_stack_start /* start */
321 lsr r0, r0, #ASAN_BLOCK_SHIFT
323 ldr r1, =__nozi_stack_end /* limit */
324 lsr r1, r1, #ASAN_BLOCK_SHIFT
327 shadow_stack_access_ok:
330 bls shadow_stack_access_ok
335 /* complete ARM secure MP common configuration */
336 bl plat_cpu_reset_late
347 * Invalidate dcache for all memory used during initialization to
348 * avoid nasty surprices when the cache is turned on. We must not
349 * invalidate memory not used by OP-TEE since we may invalidate
350 * entries used by for instance ARM Trusted Firmware.
352 #ifdef CFG_WITH_PAGER
353 inval_cache_vrange(__text_start, __tmp_hashes_end)
355 inval_cache_vrange(__text_start, __end)
359 /* Enable PL310 if not yet enabled */
365 bl core_init_mmu_regs
367 bl cpu_mmu_enable_icache
368 bl cpu_mmu_enable_dcache
370 mov r0, r4 /* pageable part address */
371 mov r1, r5 /* ns-entry address */
372 mov r2, r6 /* DT address */
373 bl generic_boot_init_primary
374 mov r4, r0 /* save entry test vector */
377 * In case we've touched memory that secondary CPUs will use before
378 * they have turned on their D-cache, clean and invalidate the
379 * D-cache before exiting to normal world.
381 #ifdef CFG_WITH_PAGER
382 flush_cache_vrange(__text_start, __init_end)
384 flush_cache_vrange(__text_start, __end)
387 /* release secondary boot cores and sync with them */
392 #ifdef CFG_PL310_LOCKED
393 /* lock/invalidate all lines: pl310 behaves as if disable */
395 bl arm_cl2_lockallways
397 bl arm_cl2_cleaninvbyway
401 * Clear current thread id now to allow the thread to be reused on
402 * next entry. Matches the thread_init_boot_thread() in
405 bl thread_clr_boot_thread
407 #if defined(CFG_WITH_ARM_TRUSTED_FW)
408 /* Pass the vector address returned from main_init */
411 /* realy standard bootarg #1 and #2 to non secure entry */
413 mov r3, r6 /* std bootarg #2 for register R2 */
414 mov r2, r7 /* std bootarg #1 for register R1 */
416 #endif /* CFG_WITH_ARM_TRUSTED_FW */
418 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
420 b . /* SMC should not return */
422 END_FUNC reset_primary
425 LOCAL_FUNC unhandled_cpu , :
430 END_FUNC unhandled_cpu
432 #if defined(CFG_WITH_ARM_TRUSTED_FW)
433 FUNC cpu_on_handler , :
449 bl core_init_mmu_regs
451 bl cpu_mmu_enable_icache
452 bl cpu_mmu_enable_dcache
456 bl generic_boot_cpu_on_handler
460 END_FUNC cpu_on_handler
462 #else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
464 LOCAL_FUNC reset_secondary , :
472 bl plat_cpu_reset_late
474 #if defined (CFG_BOOT_SECONDARY_REQUEST)
475 /* if L1 is not invalidated before, do it here */
476 bl arm_cl1_d_invbysetway
479 bl core_init_mmu_regs
481 bl cpu_mmu_enable_icache
482 bl cpu_mmu_enable_dcache
486 #if defined (CFG_BOOT_SECONDARY_REQUEST)
487 /* generic_boot_core_hpen return value (r0) is ns entry point */
488 bl generic_boot_core_hpen
490 mov r0, r5 /* ns-entry address */
492 bl generic_boot_init_secondary
494 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
500 b . /* SMC should not return */
502 END_FUNC reset_secondary
503 #endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */