2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
30 * This core mmu supports static section mapping (1MByte) and finer mapping
32 * It should also allow core to map/unmap (and va/pa) at run-time.
37 #include <kernel/generic_boot.h>
38 #include <kernel/panic.h>
39 #include <kernel/tee_l2cc_mutex.h>
40 #include <kernel/tee_misc.h>
41 #include <kernel/tee_ta_manager.h>
42 #include <kernel/thread.h>
43 #include <kernel/tz_ssvce.h>
44 #include <kernel/tz_ssvce_pl310.h>
45 #include <mm/core_memprot.h>
46 #include <mm/core_mmu.h>
48 #include <mm/pgt_cache.h>
49 #include <mm/tee_mmu.h>
50 #include <mm/tee_pager.h>
51 #include <platform_config.h>
56 #include "core_mmu_private.h"
58 #define MAX_MMAP_REGIONS 10
59 #define RES_VASPACE_SIZE (CORE_MMU_PGDIR_SIZE * 10)
62 * These variables are initialized before .bss is cleared. To avoid
63 * resetting them when .bss is cleared we're storing them in .data instead,
64 * even if they initially are zero.
67 /* Default NSec shared memory allocated from NSec world */
68 unsigned long default_nsec_shm_size __early_bss;
69 unsigned long default_nsec_shm_paddr __early_bss;
71 static struct tee_mmap_region
72 static_memory_map[MAX_MMAP_REGIONS + 1] __early_bss;
73 static bool mem_map_inited __early_bss;
75 static struct tee_mmap_region *map_tee_ram __early_bss;
76 static struct tee_mmap_region *map_ta_ram __early_bss;
77 static struct tee_mmap_region *map_nsec_shm __early_bss;
79 /* Define the platform's memory layout. */
80 struct memaccess_area {
84 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
86 static struct memaccess_area ddr[] = {
87 MEMACCESS_AREA(DRAM0_BASE, DRAM0_SIZE),
89 MEMACCESS_AREA(DRAM1_BASE, DRAM1_SIZE),
93 static struct memaccess_area secure_only[] = {
95 MEMACCESS_AREA(TZSRAM_BASE, TZSRAM_SIZE),
97 MEMACCESS_AREA(TZDRAM_BASE, TZDRAM_SIZE),
100 static struct memaccess_area nsec_shared[] = {
101 MEMACCESS_AREA(CFG_SHMEM_START, CFG_SHMEM_SIZE),
104 #ifdef CFG_TEE_SDP_MEM_BASE
105 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
108 register_phys_mem(MEM_AREA_TEE_RAM, CFG_TEE_RAM_START, CFG_TEE_RAM_PH_SIZE);
109 register_phys_mem(MEM_AREA_TA_RAM, CFG_TA_RAM_START, CFG_TA_RAM_SIZE);
110 register_phys_mem(MEM_AREA_NSEC_SHM, CFG_SHMEM_START, CFG_SHMEM_SIZE);
111 #ifdef DEVICE0_PA_BASE
112 register_phys_mem(DEVICE0_TYPE, DEVICE0_PA_BASE, DEVICE0_SIZE);
114 #ifdef DEVICE1_PA_BASE
115 register_phys_mem(DEVICE1_TYPE, DEVICE1_PA_BASE, DEVICE1_SIZE);
117 #ifdef DEVICE2_PA_BASE
118 register_phys_mem(DEVICE2_TYPE, DEVICE2_PA_BASE, DEVICE2_SIZE);
120 #ifdef DEVICE3_PA_BASE
121 register_phys_mem(DEVICE3_TYPE, DEVICE3_PA_BASE, DEVICE3_SIZE);
123 #ifdef DEVICE4_PA_BASE
124 register_phys_mem(DEVICE4_TYPE, DEVICE4_PA_BASE, DEVICE4_SIZE);
126 #ifdef DEVICE5_PA_BASE
127 register_phys_mem(DEVICE5_TYPE, DEVICE5_PA_BASE, DEVICE5_SIZE);
129 #ifdef DEVICE6_PA_BASE
130 register_phys_mem(DEVICE6_TYPE, DEVICE6_PA_BASE, DEVICE6_SIZE);
133 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
134 paddr_t pa, size_t size)
138 for (n = 0; n < alen; n++)
139 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
143 #define pbuf_intersects(a, pa, size) \
144 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
146 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
147 paddr_t pa, size_t size)
151 for (n = 0; n < alen; n++)
152 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
156 #define pbuf_is_inside(a, pa, size) \
157 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
159 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa)
163 return (pa >= map->pa && pa <= (map->pa + map->size - 1));
166 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
170 return (va >= map->va && va <= (map->va + map->size - 1));
173 /* check if target buffer fits in a core default map area */
174 static bool pbuf_inside_map_area(unsigned long p, size_t l,
175 struct tee_mmap_region *map)
177 return core_is_buffer_inside(p, l, map->pa, map->size);
180 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
182 struct tee_mmap_region *map;
184 for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++)
185 if (map->type == type)
190 static struct tee_mmap_region *find_map_by_type_and_pa(
191 enum teecore_memtypes type, paddr_t pa)
193 struct tee_mmap_region *map;
195 for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++) {
196 if (map->type != type)
198 if (pa_is_in_map(map, pa))
204 static struct tee_mmap_region *find_map_by_va(void *va)
206 struct tee_mmap_region *map = static_memory_map;
207 unsigned long a = (unsigned long)va;
209 while (map->type != MEM_AREA_NOTYPE) {
210 if ((a >= map->va) && (a <= (map->va - 1 + map->size)))
217 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
219 struct tee_mmap_region *map = static_memory_map;
221 while (map->type != MEM_AREA_NOTYPE) {
222 if ((pa >= map->pa) && (pa < (map->pa + map->size)))
229 #ifdef CFG_SECURE_DATA_PATH
230 extern const struct core_mmu_phys_mem __start_phys_sdp_mem_section;
231 extern const struct core_mmu_phys_mem __end_phys_sdp_mem_section;
233 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
235 const struct core_mmu_phys_mem *mem;
237 for (mem = &__start_phys_sdp_mem_section;
238 mem < &__end_phys_sdp_mem_section; mem++)
239 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
245 #define MSG_SDP_INSTERSECT(pa1, sz1, pa2, sz2) \
246 EMSG("[%" PRIxPA " %" PRIxPA "] intersecs [%" PRIxPA " %" PRIxPA "]", \
247 pa1, pa1 + sz1, pa2, pa2 + sz2)
249 /* Check SDP memories comply with registered memories */
250 static void verify_sdp_mem_areas(struct tee_mmap_region *mem_map, size_t len)
252 const struct core_mmu_phys_mem *mem;
253 const struct core_mmu_phys_mem *mem2;
254 const struct core_mmu_phys_mem *start = &__start_phys_sdp_mem_section;
255 const struct core_mmu_phys_mem *end = &__end_phys_sdp_mem_section;
256 struct tee_mmap_region *mmap;
260 IMSG("Secure data path enabled without any SDP memory area");
264 for (mem = start; mem < end; mem++)
265 DMSG("SDP memory [%" PRIxPA " %" PRIxPA "]",
266 mem->addr, mem->addr + mem->size);
268 /* Check SDP memories do not intersect each other */
269 for (mem = start; mem < end - 1; mem++) {
270 for (mem2 = mem + 1; mem2 < end; mem2++) {
271 if (core_is_buffer_intersect(mem2->addr, mem2->size,
272 mem->addr, mem->size)) {
273 MSG_SDP_INSTERSECT(mem2->addr, mem2->size,
274 mem->addr, mem->size);
275 panic("SDP memory intersection");
281 * Check SDP memories do not intersect any mapped memory.
282 * This is called before reserved VA space is loaded in mem_map.
284 for (mem = start; mem < end; mem++) {
285 for (mmap = mem_map, n = 0; n < len; mmap++, n++) {
286 if (core_is_buffer_intersect(mem->addr, mem->size,
287 mmap->pa, mmap->size)) {
288 MSG_SDP_INSTERSECT(mem->addr, mem->size,
289 mmap->pa, mmap->size);
290 panic("SDP memory intersection");
296 struct mobj **core_sdp_mem_create_mobjs(void)
298 const struct core_mmu_phys_mem *mem;
299 struct mobj **mobj_base;
301 int cnt = &__end_phys_sdp_mem_section - &__start_phys_sdp_mem_section;
303 /* SDP mobjs table must end with a NULL entry */
304 mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
306 panic("Out of memory");
308 for (mem = &__start_phys_sdp_mem_section, mobj = mobj_base;
309 mem < &__end_phys_sdp_mem_section; mem++, mobj++) {
310 *mobj = mobj_phys_alloc(mem->addr, mem->size,
311 TEE_MATTR_CACHE_CACHED,
314 panic("can't create SDP physical memory object");
318 #else /* CFG_SECURE_DATA_PATH */
319 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
324 static void verify_sdp_mem_areas(struct tee_mmap_region *mem_map __unused,
328 #endif /* CFG_SECURE_DATA_PATH */
330 extern const struct core_mmu_phys_mem __start_phys_mem_map_section;
331 extern const struct core_mmu_phys_mem __end_phys_mem_map_section;
333 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
334 const struct core_mmu_phys_mem *mem, size_t *last)
341 * When all entries are added we'd like to have it in a sorted
342 * array first based on memory type and secondly on physical
343 * address. If some ranges of memory of the same type overlaps of
344 * are next to each others they are coalesced into one entry. This
345 * makes it easier later when building the translation tables.
347 * Note that it's valid to have the same physical memory as several
348 * different memory types, for instance the same device memory
349 * mapped as both secure and non-secure. This will probably not
350 * happen often in practice.
352 DMSG("%s %d 0x%08" PRIxPA " size 0x%08zx",
353 mem->name, mem->type, mem->addr, mem->size);
355 if (n >= (num_elems - 1)) {
356 EMSG("Out of entries (%zu) in memory_map", num_elems);
361 pa = memory_map[n].pa;
362 size = memory_map[n].size;
363 if (mem->addr >= pa && mem->addr <= (pa + (size - 1)) &&
364 mem->type == memory_map[n].type) {
365 DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr);
366 memory_map[n].pa = MIN(pa, mem->addr);
367 memory_map[n].size = MAX(size, mem->size) +
368 (pa - memory_map[n].pa);
371 if (mem->type < memory_map[n].type ||
372 (mem->type == memory_map[n].type && mem->addr < pa))
373 break; /* found the spot where to inseart this memory */
377 memmove(memory_map + n + 1, memory_map + n,
378 sizeof(struct tee_mmap_region) * (*last - n));
380 memset(memory_map + n, 0, sizeof(memory_map[0]));
381 memory_map[n].type = mem->type;
382 memory_map[n].pa = mem->addr;
383 memory_map[n].size = mem->size;
386 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
387 unsigned int type, size_t size, size_t *last) {
390 DMSG("type %d size 0x%08zx", type, size);
392 if (n >= (num_elems - 1)) {
393 EMSG("Out of entries (%zu) in memory_map", num_elems);
398 if (type < memory_map[n].type)
403 memmove(memory_map + n + 1, memory_map + n,
404 sizeof(struct tee_mmap_region) * (*last - n));
406 memset(memory_map + n, 0, sizeof(memory_map[0]));
407 memory_map[n].type = type;
408 memory_map[n].size = size;
411 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
413 const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
415 const uint32_t cached = TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT;
416 const uint32_t noncache = TEE_MATTR_CACHE_NONCACHE <<
417 TEE_MATTR_CACHE_SHIFT;
420 case MEM_AREA_TEE_RAM:
421 return attr | TEE_MATTR_SECURE | TEE_MATTR_PX | cached;
422 case MEM_AREA_TA_RAM:
423 return attr | TEE_MATTR_SECURE | cached;
424 case MEM_AREA_NSEC_SHM:
425 return attr | cached;
426 case MEM_AREA_IO_NSEC:
427 return attr | noncache;
428 case MEM_AREA_IO_SEC:
429 return attr | TEE_MATTR_SECURE | noncache;
430 case MEM_AREA_RAM_NSEC:
431 return attr | cached;
432 case MEM_AREA_RAM_SEC:
433 return attr | TEE_MATTR_SECURE | cached;
434 case MEM_AREA_RES_VASPACE:
437 panic("invalid type");
441 static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
443 const struct core_mmu_phys_mem *mem;
444 struct tee_mmap_region *map;
449 for (mem = &__start_phys_mem_map_section;
450 mem < &__end_phys_mem_map_section; mem++) {
451 struct core_mmu_phys_mem m = *mem;
453 if (m.type == MEM_AREA_IO_NSEC || m.type == MEM_AREA_IO_SEC) {
454 m.addr = ROUNDDOWN(m.addr, CORE_MMU_PGDIR_SIZE);
455 m.size = ROUNDUP(m.size + (mem->addr - m.addr),
456 CORE_MMU_PGDIR_SIZE);
458 add_phys_mem(memory_map, num_elems, &m, &last);
461 verify_sdp_mem_areas(memory_map, num_elems);
463 add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
464 RES_VASPACE_SIZE, &last);
466 memory_map[last].type = MEM_AREA_NOTYPE;
469 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
470 * SMALL_PAGE_SIZE if paging is enabled.
472 for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
473 paddr_t mask = map->pa | map->size;
475 if (!(mask & CORE_MMU_PGDIR_MASK))
476 map->region_size = CORE_MMU_PGDIR_SIZE;
477 else if (!(mask & SMALL_PAGE_MASK))
478 map->region_size = SMALL_PAGE_SIZE;
480 panic("Impossible memory alignment");
484 * bootcfg_memory_map is sorted in order first by type and last by
485 * address. This puts TEE_RAM first and TA_RAM second
489 assert(map->type == MEM_AREA_TEE_RAM);
491 #ifdef CFG_WITH_PAGER
492 map->region_size = SMALL_PAGE_SIZE;
494 map->attr = core_mmu_type_to_attr(map->type);
497 if (core_mmu_place_tee_ram_at_top(map->pa)) {
500 while (map->type != MEM_AREA_NOTYPE) {
501 map->attr = core_mmu_type_to_attr(map->type);
503 va = ROUNDDOWN(va, map->region_size);
508 * The memory map should be sorted by virtual address
509 * when this function returns. As we're assigning va in
510 * the oposite direction we need to reverse the list.
512 for (n = 0; n < last / 2; n++) {
513 struct tee_mmap_region r;
515 r = memory_map[last - n - 1];
516 memory_map[last - n - 1] = memory_map[n];
520 va = ROUNDUP(map->va + map->size, CORE_MMU_PGDIR_SIZE);
522 while (map->type != MEM_AREA_NOTYPE) {
523 map->attr = core_mmu_type_to_attr(map->type);
524 va = ROUNDUP(va, map->region_size);
531 for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
532 vaddr_t __maybe_unused vstart;
534 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
535 DMSG("type va %d 0x%08" PRIxVA "..0x%08" PRIxVA
536 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size %#zx",
537 map->type, vstart, vstart + map->size - 1,
538 (paddr_t)map->pa, (paddr_t)map->pa + map->size - 1,
544 * core_init_mmu_map - init tee core default memory mapping
546 * this routine sets the static default tee core mapping.
548 * If an error happend: core_init_mmu_map is expected to reset.
550 void core_init_mmu_map(void)
552 struct tee_mmap_region *map;
555 for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
556 if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
557 secure_only[n].size))
558 panic("Invalid memory access config: sec/nsec");
562 init_mem_map(static_memory_map, ARRAY_SIZE(static_memory_map));
564 map = static_memory_map;
565 while (map->type != MEM_AREA_NOTYPE) {
567 case MEM_AREA_TEE_RAM:
568 if (!pbuf_is_inside(secure_only, map->pa, map->size))
569 panic("TEE_RAM can't fit in secure_only");
573 case MEM_AREA_TA_RAM:
574 if (!pbuf_is_inside(secure_only, map->pa, map->size))
575 panic("TA_RAM can't fit in secure_only");
578 case MEM_AREA_NSEC_SHM:
579 if (!pbuf_is_inside(nsec_shared, map->pa, map->size))
580 panic("NS_SHM can't fit in nsec_shared");
583 case MEM_AREA_IO_SEC:
584 case MEM_AREA_IO_NSEC:
585 case MEM_AREA_RAM_SEC:
586 case MEM_AREA_RAM_NSEC:
587 case MEM_AREA_RES_VASPACE:
590 EMSG("Uhandled memtype %d", map->type);
596 /* Check that we have the mandatory memory areas defined */
597 if (!map_tee_ram || !map_ta_ram || !map_nsec_shm)
598 panic("mandatory area(s) not found");
600 core_init_mmu_tables(static_memory_map);
603 /* routines to retrieve shared mem configuration */
604 bool core_mmu_is_shm_cached(void)
608 return map_nsec_shm->attr >> TEE_MATTR_CACHE_SHIFT ==
609 TEE_MATTR_CACHE_CACHED;
612 bool core_mmu_mattr_is_ok(uint32_t mattr)
615 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
616 * core_mmu_v7.c:mattr_to_texcb
619 switch ((mattr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
620 case TEE_MATTR_CACHE_NONCACHE:
621 case TEE_MATTR_CACHE_CACHED:
629 * test attributes of target physical buffer
631 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
634 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
636 struct tee_mmap_region *map;
638 /* Empty buffers complies with anything */
644 return pbuf_is_inside(secure_only, pbuf, len);
645 case CORE_MEM_NON_SEC:
646 return pbuf_is_inside(nsec_shared, pbuf, len);
647 case CORE_MEM_TEE_RAM:
648 return pbuf_inside_map_area(pbuf, len, map_tee_ram);
649 case CORE_MEM_TA_RAM:
650 return pbuf_inside_map_area(pbuf, len, map_ta_ram);
651 case CORE_MEM_NSEC_SHM:
652 return pbuf_inside_map_area(pbuf, len, map_nsec_shm);
653 case CORE_MEM_SDP_MEM:
654 return pbuf_is_sdp_mem(pbuf, len);
655 case CORE_MEM_EXTRAM:
656 return pbuf_is_inside(ddr, pbuf, len);
657 case CORE_MEM_CACHED:
658 map = find_map_by_pa(pbuf);
659 if (map == NULL || !pbuf_inside_map_area(pbuf, len, map))
661 return map->attr >> TEE_MATTR_CACHE_SHIFT ==
662 TEE_MATTR_CACHE_CACHED;
668 /* test attributes of target virtual buffer (in core mapping) */
669 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
673 /* Empty buffers complies with anything */
677 p = virt_to_phys((void *)vbuf);
681 return core_pbuf_is(attr, p, len);
685 /* core_va2pa - teecore exported service */
686 int core_va2pa_helper(void *va, paddr_t *pa)
688 struct tee_mmap_region *map;
690 map = find_map_by_va(va);
691 if (!va_is_in_map(map, (vaddr_t)va))
694 *pa = ((uintptr_t)va & (map->region_size - 1)) |
695 ((map->pa + (uintptr_t)va - map->va) & ~(map->region_size - 1));
699 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa)
701 if (!pa_is_in_map(map, pa))
703 return (void *)((pa & (map->region_size - 1)) |
704 ((map->va + pa - map->pa) & ~((vaddr_t)map->region_size - 1)));
708 * teecore gets some memory area definitions
710 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
712 struct tee_mmap_region *map = find_map_by_type(type);
716 *e = map->va + map->size;
723 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
725 struct tee_mmap_region *map = find_map_by_pa(pa);
728 return MEM_AREA_NOTYPE;
732 int core_tlb_maintenance(int op, unsigned int a)
735 * We're doing TLB invalidation because we've changed mapping.
736 * The dsb() makes sure that written data is visible.
741 case TLBINV_UNIFIEDTLB:
742 secure_mmu_unifiedtlbinvall();
744 case TLBINV_CURRENT_ASID:
745 secure_mmu_unifiedtlbinv_curasid();
748 secure_mmu_unifiedtlbinv_byasid(a);
751 EMSG("TLB_INV_SECURE_MVA is not yet supported!");
754 secure_mmu_unifiedtlbinvbymva(a);
762 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len)
766 arm_cl1_d_cleanbysetway();
768 case DCACHE_AREA_CLEAN:
770 arm_cl1_d_cleanbyva(va, (char *)va + len - 1);
772 case DCACHE_INVALIDATE:
773 arm_cl1_d_invbysetway();
775 case DCACHE_AREA_INVALIDATE:
777 arm_cl1_d_invbyva(va, (char *)va + len - 1);
779 case ICACHE_INVALIDATE:
782 case ICACHE_AREA_INVALIDATE:
784 arm_cl1_i_inv(va, (char *)va + len - 1);
786 case DCACHE_CLEAN_INV:
787 arm_cl1_d_cleaninvbysetway();
789 case DCACHE_AREA_CLEAN_INV:
791 arm_cl1_d_cleaninvbyva(va, (char *)va + len - 1);
794 return TEE_ERROR_NOT_IMPLEMENTED;
800 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len)
802 TEE_Result ret = TEE_SUCCESS;
803 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
805 tee_l2cc_mutex_lock();
807 case DCACHE_INVALIDATE:
808 arm_cl2_invbyway(pl310_base());
810 case DCACHE_AREA_INVALIDATE:
812 arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
815 arm_cl2_cleanbyway(pl310_base());
817 case DCACHE_AREA_CLEAN:
819 arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
821 case DCACHE_CLEAN_INV:
822 arm_cl2_cleaninvbyway(pl310_base());
824 case DCACHE_AREA_CLEAN_INV:
826 arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
829 ret = TEE_ERROR_NOT_IMPLEMENTED;
832 tee_l2cc_mutex_unlock();
833 thread_set_exceptions(exceptions);
838 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
839 paddr_t pa, uint32_t attr)
841 assert(idx < tbl_info->num_entries);
842 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
846 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
847 paddr_t *pa, uint32_t *attr)
849 assert(idx < tbl_info->num_entries);
850 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
854 static void set_region(struct core_mmu_table_info *tbl_info,
855 struct tee_mmap_region *region)
861 /* va, len and pa should be block aligned */
862 assert(!core_mmu_get_block_offset(tbl_info, region->va));
863 assert(!core_mmu_get_block_offset(tbl_info, region->size));
864 assert(!core_mmu_get_block_offset(tbl_info, region->pa));
866 idx = core_mmu_va2idx(tbl_info, region->va);
867 end = core_mmu_va2idx(tbl_info, region->va + region->size);
871 core_mmu_set_entry(tbl_info, idx, pa, region->attr);
873 pa += 1 << tbl_info->shift;
877 #ifdef CFG_SMALL_PAGE_USER_TA
878 static void set_pg_region(struct core_mmu_table_info *dir_info,
879 struct tee_ta_region *region, struct pgt **pgt,
880 struct core_mmu_table_info *pg_info)
882 struct tee_mmap_region r = {
884 .size = region->size,
885 .attr = region->attr,
887 vaddr_t end = r.va + r.size;
888 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
891 if (!pg_info->table ||
892 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
894 * We're assigning a new translation table.
898 assert(*pgt); /* We should have alloced enough */
900 /* Virtual addresses must grow */
901 assert(r.va > pg_info->va_base);
903 idx = core_mmu_va2idx(dir_info, r.va);
904 pg_info->table = (*pgt)->tbl;
905 pg_info->va_base = core_mmu_idx2va(dir_info, idx);
906 #ifdef CFG_PAGED_USER_TA
907 assert((*pgt)->vabase == pg_info->va_base);
909 *pgt = SLIST_NEXT(*pgt, link);
911 core_mmu_set_entry(dir_info, idx,
912 virt_to_phys(pg_info->table),
916 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
918 if (!mobj_is_paged(region->mobj)) {
919 size_t granule = BIT(pg_info->shift);
920 size_t offset = r.va - region->va + region->offset;
922 if (mobj_get_pa(region->mobj, offset, granule,
923 &r.pa) != TEE_SUCCESS)
924 panic("Failed to get PA of unpaged mobj");
925 set_region(pg_info, &r);
931 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
932 struct user_ta_ctx *utc)
934 struct core_mmu_table_info pg_info;
935 struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
939 /* Find the last valid entry */
940 n = ARRAY_SIZE(utc->mmu->regions);
943 if (utc->mmu->regions[n].size)
946 return; /* Nothing to map */
950 * Allocate all page tables in advance.
952 pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->regions[0].va,
953 utc->mmu->regions[n].va + utc->mmu->regions[n].size - 1);
954 pgt = SLIST_FIRST(pgt_cache);
956 core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
958 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++)
959 mobj_update_mapping(utc->mmu->regions[n].mobj, utc,
960 utc->mmu->regions[n].va);
962 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
963 if (!utc->mmu->regions[n].size)
965 set_pg_region(dir_info, utc->mmu->regions + n, &pgt, &pg_info);
970 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
971 struct user_ta_ctx *utc)
974 struct tee_mmap_region r;
976 size_t granule = BIT(dir_info->shift);
978 memset(&r, 0, sizeof(r));
979 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
980 if (!utc->mmu->regions[n].size)
983 offset = utc->mmu->regions[n].offset;
984 r.va = utc->mmu->regions[n].va;
985 r.size = utc->mmu->regions[n].size;
986 r.attr = utc->mmu->regions[n].attr;
988 if (mobj_get_pa(utc->mmu->regions[n].mobj, offset, granule,
989 &r.pa) != TEE_SUCCESS)
990 panic("Failed to get PA of unpaged mobj");
992 set_region(dir_info, &r);
997 bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
999 struct core_mmu_table_info tbl_info;
1000 struct tee_mmap_region *map;
1009 /* Check if the memory is already mapped */
1010 map = find_map_by_type_and_pa(type, addr);
1011 if (map && pbuf_inside_map_area(addr, len, map))
1014 /* Find the reserved va space used for late mappings */
1015 map = find_map_by_type(MEM_AREA_RES_VASPACE);
1019 if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info))
1022 granule = 1 << tbl_info.shift;
1023 p = ROUNDDOWN(addr, granule);
1024 l = ROUNDUP(len + addr - p, granule);
1026 * Something is wrong, we can't fit the va range into the selected
1027 * table. The reserved va range is possibly missaligned with
1030 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
1033 /* Find end of the memory map */
1035 while (static_memory_map[n].type != MEM_AREA_NOTYPE)
1038 if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
1039 /* There's room for another entry */
1040 static_memory_map[n].va = map->va;
1041 static_memory_map[n].size = l;
1042 static_memory_map[n + 1].type = MEM_AREA_NOTYPE;
1045 map = static_memory_map + n;
1048 * There isn't room for another entry, steal the reserved
1049 * entry as it's not useful for anything else any longer.
1054 map->region_size = granule;
1055 map->attr = core_mmu_type_to_attr(type);
1058 set_region(&tbl_info, map);
1062 static bool arm_va2pa_helper(void *va, paddr_t *pa)
1064 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1066 paddr_t par_pa_mask;
1070 write_ats1cpr((vaddr_t)va);
1072 #ifdef CFG_WITH_LPAE
1074 par_pa_mask = PAR64_PA_MASK;
1077 par_pa_mask = PAR32_PA_MASK;
1082 write_at_s1e1r((vaddr_t)va);
1084 par = read_par_el1();
1085 par_pa_mask = PAR_PA_MASK;
1089 *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
1090 ((vaddr_t)va & ((1 << PAR_PA_SHIFT) - 1));
1094 thread_unmask_exceptions(exceptions);
1098 #ifdef CFG_WITH_PAGER
1099 static vaddr_t get_linear_map_end(void)
1101 /* this is synced with the generic linker file kern.ld.S */
1102 return (vaddr_t)__heap2_end;
1106 #if defined(CFG_TEE_CORE_DEBUG)
1107 static void check_pa_matches_va(void *va, paddr_t pa)
1110 vaddr_t v = (vaddr_t)va;
1113 if (core_mmu_user_va_range_is_defined()) {
1114 vaddr_t user_va_base;
1115 size_t user_va_size;
1117 core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1118 if (v >= user_va_base &&
1119 v <= (user_va_base - 1 + user_va_size)) {
1120 if (!core_mmu_user_mapping_is_active()) {
1122 panic("issue in linear address space");
1126 res = tee_mmu_user_va2pa_helper(
1127 to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
1128 if (res == TEE_SUCCESS && pa != p)
1130 if (res != TEE_SUCCESS && pa)
1135 #ifdef CFG_WITH_PAGER
1136 if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
1138 panic("issue in linear address space");
1141 if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
1142 v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
1143 struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1147 * Lookups in the page table managed by the pager is
1148 * dangerous for addresses in the paged area as those pages
1149 * changes all the time. But some ranges are safe,
1150 * rw-locked areas when the page is populated for instance.
1152 core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
1153 if (a & TEE_MATTR_VALID_BLOCK) {
1154 paddr_t mask = ((1 << ti->shift) - 1);
1165 if (!core_va2pa_helper(va, &p)) {
1174 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
1179 paddr_t virt_to_phys(void *va)
1183 if (!arm_va2pa_helper(va, &pa))
1185 check_pa_matches_va(va, pa);
1189 #if defined(CFG_TEE_CORE_DEBUG)
1190 static void check_va_matches_pa(paddr_t pa, void *va)
1192 if (va && virt_to_phys(va) != pa)
1196 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
1201 static void *phys_to_virt_ta_vaspace(paddr_t pa)
1206 if (!core_mmu_user_mapping_is_active())
1209 res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()),
1211 if (res != TEE_SUCCESS)
1216 #ifdef CFG_WITH_PAGER
1217 static void *phys_to_virt_tee_ram(paddr_t pa)
1219 struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1225 if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
1226 return (void *)(vaddr_t)pa;
1228 end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
1229 CFG_TEE_RAM_VA_SIZE);
1230 /* Most addresses are mapped lineary, try that first if possible. */
1231 idx = core_mmu_va2idx(ti, pa);
1232 if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
1234 core_mmu_get_entry(ti, idx, &p, &a);
1235 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
1236 return (void *)core_mmu_idx2va(ti, idx);
1239 for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
1240 idx < end_idx; idx++) {
1241 core_mmu_get_entry(ti, idx, &p, &a);
1242 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
1243 return (void *)core_mmu_idx2va(ti, idx);
1249 static void *phys_to_virt_tee_ram(paddr_t pa)
1251 return map_pa2va(find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa), pa);
1255 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m)
1260 case MEM_AREA_TA_VASPACE:
1261 va = phys_to_virt_ta_vaspace(pa);
1263 case MEM_AREA_TEE_RAM:
1264 va = phys_to_virt_tee_ram(pa);
1267 va = map_pa2va(find_map_by_type_and_pa(m, pa), pa);
1269 check_va_matches_pa(pa, va);
1273 void *phys_to_virt_io(paddr_t pa)
1275 struct tee_mmap_region *map;
1278 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa);
1280 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa);
1283 va = map_pa2va(map, pa);
1284 check_va_matches_pa(pa, va);
1288 bool cpu_mmu_enabled(void)
1293 sctlr = read_sctlr();
1295 sctlr = read_sctlr_el1();
1298 return sctlr & SCTLR_M ? true : false;