2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
30 * This core mmu supports static section mapping (1MByte) and finer mapping
32 * It should also allow core to map/unmap (and va/pa) at run-time.
37 #include <kernel/generic_boot.h>
38 #include <kernel/panic.h>
39 #include <kernel/tee_l2cc_mutex.h>
40 #include <kernel/tee_misc.h>
41 #include <kernel/tee_ta_manager.h>
42 #include <kernel/thread.h>
43 #include <kernel/tz_ssvce.h>
44 #include <kernel/tz_ssvce_pl310.h>
45 #include <mm/core_memprot.h>
46 #include <mm/core_mmu.h>
48 #include <mm/pgt_cache.h>
49 #include <mm/tee_mmu.h>
50 #include <mm/tee_pager.h>
51 #include <platform_config.h>
56 #include "core_mmu_private.h"
58 #define MAX_MMAP_REGIONS 10
59 #define RES_VASPACE_SIZE (CORE_MMU_PGDIR_SIZE * 10)
62 * These variables are initialized before .bss is cleared. To avoid
63 * resetting them when .bss is cleared we're storing them in .data instead,
64 * even if they initially are zero.
67 /* Default NSec shared memory allocated from NSec world */
68 unsigned long default_nsec_shm_size __early_bss;
69 unsigned long default_nsec_shm_paddr __early_bss;
71 static struct tee_mmap_region
72 static_memory_map[MAX_MMAP_REGIONS + 1] __early_bss;
73 static bool mem_map_inited __early_bss;
75 static struct tee_mmap_region *map_tee_ram __early_bss;
76 static struct tee_mmap_region *map_ta_ram __early_bss;
77 static struct tee_mmap_region *map_nsec_shm __early_bss;
79 /* Define the platform's memory layout. */
80 struct memaccess_area {
84 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
86 static struct memaccess_area ddr[] = {
87 MEMACCESS_AREA(DRAM0_BASE, DRAM0_SIZE),
89 MEMACCESS_AREA(DRAM1_BASE, DRAM1_SIZE),
93 static struct memaccess_area secure_only[] = {
95 MEMACCESS_AREA(TZSRAM_BASE, TZSRAM_SIZE),
97 MEMACCESS_AREA(TZDRAM_BASE, TZDRAM_SIZE),
100 static struct memaccess_area nsec_shared[] = {
101 MEMACCESS_AREA(CFG_SHMEM_START, CFG_SHMEM_SIZE),
104 register_phys_mem(MEM_AREA_TEE_RAM, CFG_TEE_RAM_START, CFG_TEE_RAM_PH_SIZE);
105 register_phys_mem(MEM_AREA_TA_RAM, CFG_TA_RAM_START, CFG_TA_RAM_SIZE);
106 register_phys_mem(MEM_AREA_NSEC_SHM, CFG_SHMEM_START, CFG_SHMEM_SIZE);
107 #ifdef DEVICE0_PA_BASE
108 register_phys_mem(DEVICE0_TYPE, DEVICE0_PA_BASE, DEVICE0_SIZE);
110 #ifdef DEVICE1_PA_BASE
111 register_phys_mem(DEVICE1_TYPE, DEVICE1_PA_BASE, DEVICE1_SIZE);
113 #ifdef DEVICE2_PA_BASE
114 register_phys_mem(DEVICE2_TYPE, DEVICE2_PA_BASE, DEVICE2_SIZE);
116 #ifdef DEVICE3_PA_BASE
117 register_phys_mem(DEVICE3_TYPE, DEVICE3_PA_BASE, DEVICE3_SIZE);
119 #ifdef DEVICE4_PA_BASE
120 register_phys_mem(DEVICE4_TYPE, DEVICE4_PA_BASE, DEVICE4_SIZE);
122 #ifdef DEVICE5_PA_BASE
123 register_phys_mem(DEVICE5_TYPE, DEVICE5_PA_BASE, DEVICE5_SIZE);
125 #ifdef DEVICE6_PA_BASE
126 register_phys_mem(DEVICE6_TYPE, DEVICE6_PA_BASE, DEVICE6_SIZE);
129 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
130 paddr_t pa, size_t size)
134 for (n = 0; n < alen; n++)
135 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
139 #define pbuf_intersects(a, pa, size) \
140 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
142 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
143 paddr_t pa, size_t size)
147 for (n = 0; n < alen; n++)
148 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
152 #define pbuf_is_inside(a, pa, size) \
153 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
155 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa)
159 return (pa >= map->pa && pa <= (map->pa + map->size - 1));
162 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
166 return (va >= map->va && va <= (map->va + map->size - 1));
169 /* check if target buffer fits in a core default map area */
170 static bool pbuf_inside_map_area(unsigned long p, size_t l,
171 struct tee_mmap_region *map)
173 return core_is_buffer_inside(p, l, map->pa, map->size);
176 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
178 struct tee_mmap_region *map;
180 for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++)
181 if (map->type == type)
186 static struct tee_mmap_region *find_map_by_type_and_pa(
187 enum teecore_memtypes type, paddr_t pa)
189 struct tee_mmap_region *map;
191 for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++) {
192 if (map->type != type)
194 if (pa_is_in_map(map, pa))
200 static struct tee_mmap_region *find_map_by_va(void *va)
202 struct tee_mmap_region *map = static_memory_map;
203 unsigned long a = (unsigned long)va;
205 while (map->type != MEM_AREA_NOTYPE) {
206 if ((a >= map->va) && (a <= (map->va - 1 + map->size)))
213 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
215 struct tee_mmap_region *map = static_memory_map;
217 while (map->type != MEM_AREA_NOTYPE) {
218 if ((pa >= map->pa) && (pa < (map->pa + map->size)))
225 extern const struct core_mmu_phys_mem __start_phys_mem_map_section;
226 extern const struct core_mmu_phys_mem __end_phys_mem_map_section;
228 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
229 const struct core_mmu_phys_mem *mem, size_t *last)
236 * When all entries are added we'd like to have it in a sorted
237 * array first based on memory type and secondly on physical
238 * address. If some ranges of memory of the same type overlaps of
239 * are next to each others they are coalesced into one entry. This
240 * makes it easier later when building the translation tables.
242 * Note that it's valid to have the same physical memory as several
243 * different memory types, for instance the same device memory
244 * mapped as both secure and non-secure. This will probably not
245 * happen often in practice.
247 DMSG("%s %d 0x%08" PRIxPA " size 0x%08zx",
248 mem->name, mem->type, mem->addr, mem->size);
250 if (n >= (num_elems - 1)) {
251 EMSG("Out of entries (%zu) in memory_map", num_elems);
256 pa = memory_map[n].pa;
257 size = memory_map[n].size;
258 if (mem->addr >= pa && mem->addr <= (pa + (size - 1)) &&
259 mem->type == memory_map[n].type) {
260 DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr);
261 memory_map[n].pa = MIN(pa, mem->addr);
262 memory_map[n].size = MAX(size, mem->size) +
263 (pa - memory_map[n].pa);
266 if (mem->type < memory_map[n].type ||
267 (mem->type == memory_map[n].type && mem->addr < pa))
268 break; /* found the spot where to inseart this memory */
272 memmove(memory_map + n + 1, memory_map + n,
273 sizeof(struct tee_mmap_region) * (*last - n));
275 memset(memory_map + n, 0, sizeof(memory_map[0]));
276 memory_map[n].type = mem->type;
277 memory_map[n].pa = mem->addr;
278 memory_map[n].size = mem->size;
281 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
282 unsigned int type, size_t size, size_t *last) {
285 DMSG("type %d size 0x%08zx", type, size);
287 if (n >= (num_elems - 1)) {
288 EMSG("Out of entries (%zu) in memory_map", num_elems);
293 if (type < memory_map[n].type)
298 memmove(memory_map + n + 1, memory_map + n,
299 sizeof(struct tee_mmap_region) * (*last - n));
301 memset(memory_map + n, 0, sizeof(memory_map[0]));
302 memory_map[n].type = type;
303 memory_map[n].size = size;
306 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
308 const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
310 const uint32_t cached = TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT;
311 const uint32_t noncache = TEE_MATTR_CACHE_NONCACHE <<
312 TEE_MATTR_CACHE_SHIFT;
315 case MEM_AREA_TEE_RAM:
316 return attr | TEE_MATTR_SECURE | TEE_MATTR_PX | cached;
317 case MEM_AREA_TA_RAM:
318 return attr | TEE_MATTR_SECURE | cached;
319 case MEM_AREA_NSEC_SHM:
320 return attr | cached;
321 case MEM_AREA_IO_NSEC:
322 return attr | noncache;
323 case MEM_AREA_IO_SEC:
324 return attr | TEE_MATTR_SECURE | noncache;
325 case MEM_AREA_RAM_NSEC:
326 return attr | cached;
327 case MEM_AREA_RAM_SEC:
328 return attr | TEE_MATTR_SECURE | cached;
329 case MEM_AREA_RES_VASPACE:
332 panic("invalid type");
336 static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
338 const struct core_mmu_phys_mem *mem;
339 struct tee_mmap_region *map;
344 for (mem = &__start_phys_mem_map_section;
345 mem < &__end_phys_mem_map_section; mem++) {
346 struct core_mmu_phys_mem m = *mem;
348 if (m.type == MEM_AREA_IO_NSEC || m.type == MEM_AREA_IO_SEC) {
349 m.addr = ROUNDDOWN(m.addr, CORE_MMU_PGDIR_SIZE);
350 m.size = ROUNDUP(m.size + (mem->addr - m.addr),
351 CORE_MMU_PGDIR_SIZE);
353 add_phys_mem(memory_map, num_elems, &m, &last);
356 add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
357 RES_VASPACE_SIZE, &last);
359 memory_map[last].type = MEM_AREA_NOTYPE;
362 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
363 * SMALL_PAGE_SIZE if paging is enabled.
365 for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
366 paddr_t mask = map->pa | map->size;
368 if (!(mask & CORE_MMU_PGDIR_MASK))
369 map->region_size = CORE_MMU_PGDIR_SIZE;
370 else if (!(mask & SMALL_PAGE_MASK))
371 map->region_size = SMALL_PAGE_SIZE;
373 panic("Impossible memory alignment");
377 * bootcfg_memory_map is sorted in order first by type and last by
378 * address. This puts TEE_RAM first and TA_RAM second
382 assert(map->type == MEM_AREA_TEE_RAM);
384 #ifdef CFG_WITH_PAGER
385 map->region_size = SMALL_PAGE_SIZE,
387 map->attr = core_mmu_type_to_attr(map->type);
390 if (core_mmu_place_tee_ram_at_top(map->pa)) {
393 while (map->type != MEM_AREA_NOTYPE) {
394 map->attr = core_mmu_type_to_attr(map->type);
400 * The memory map should be sorted by virtual address
401 * when this function returns. As we're assigning va in
402 * the oposite direction we need to reverse the list.
404 for (n = 0; n < last / 2; n++) {
405 struct tee_mmap_region r;
407 r = memory_map[last - n - 1];
408 memory_map[last - n - 1] = memory_map[n];
412 va = ROUNDUP(map->va + map->size, CORE_MMU_PGDIR_SIZE);
414 while (map->type != MEM_AREA_NOTYPE) {
415 map->attr = core_mmu_type_to_attr(map->type);
422 for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
423 vaddr_t __maybe_unused vstart;
425 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
426 DMSG("type va %d 0x%08" PRIxVA "..0x%08" PRIxVA
427 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size %#zx",
428 map->type, vstart, vstart + map->size - 1,
429 (paddr_t)map->pa, (paddr_t)map->pa + map->size - 1,
435 * core_init_mmu_map - init tee core default memory mapping
437 * this routine sets the static default tee core mapping.
439 * If an error happend: core_init_mmu_map is expected to reset.
441 void core_init_mmu_map(void)
443 struct tee_mmap_region *map;
446 for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
447 if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
448 secure_only[n].size))
449 panic("Invalid memory access config: sec/nsec");
453 init_mem_map(static_memory_map, ARRAY_SIZE(static_memory_map));
455 map = static_memory_map;
456 while (map->type != MEM_AREA_NOTYPE) {
458 case MEM_AREA_TEE_RAM:
459 if (!pbuf_is_inside(secure_only, map->pa, map->size))
460 panic("TEE_RAM can't fit in secure_only");
464 case MEM_AREA_TA_RAM:
465 if (!pbuf_is_inside(secure_only, map->pa, map->size))
466 panic("TA_RAM can't fit in secure_only");
469 case MEM_AREA_NSEC_SHM:
470 if (!pbuf_is_inside(nsec_shared, map->pa, map->size))
471 panic("NS_SHM can't fit in nsec_shared");
474 case MEM_AREA_IO_SEC:
475 case MEM_AREA_IO_NSEC:
476 case MEM_AREA_RAM_SEC:
477 case MEM_AREA_RAM_NSEC:
478 case MEM_AREA_RES_VASPACE:
481 EMSG("Uhandled memtype %d", map->type);
487 /* Check that we have the mandatory memory areas defined */
488 if (!map_tee_ram || !map_ta_ram || !map_nsec_shm)
489 panic("mandatory area(s) not found");
491 core_init_mmu_tables(static_memory_map);
494 /* routines to retrieve shared mem configuration */
495 bool core_mmu_is_shm_cached(void)
499 return map_nsec_shm->attr >> TEE_MATTR_CACHE_SHIFT ==
500 TEE_MATTR_CACHE_CACHED;
503 bool core_mmu_mattr_is_ok(uint32_t mattr)
506 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
507 * core_mmu_v7.c:mattr_to_texcb
510 switch ((mattr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
511 case TEE_MATTR_CACHE_NONCACHE:
512 case TEE_MATTR_CACHE_CACHED:
520 * test attributes of target physical buffer
522 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
525 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
527 struct tee_mmap_region *map;
529 /* Empty buffers complies with anything */
535 return pbuf_is_inside(secure_only, pbuf, len);
536 case CORE_MEM_NON_SEC:
537 return pbuf_is_inside(nsec_shared, pbuf, len);
538 case CORE_MEM_TEE_RAM:
539 return pbuf_inside_map_area(pbuf, len, map_tee_ram);
540 case CORE_MEM_TA_RAM:
541 return pbuf_inside_map_area(pbuf, len, map_ta_ram);
542 case CORE_MEM_NSEC_SHM:
543 return pbuf_inside_map_area(pbuf, len, map_nsec_shm);
544 case CORE_MEM_EXTRAM:
545 return pbuf_is_inside(ddr, pbuf, len);
546 case CORE_MEM_CACHED:
547 map = find_map_by_pa(pbuf);
548 if (map == NULL || !pbuf_inside_map_area(pbuf, len, map))
550 return map->attr >> TEE_MATTR_CACHE_SHIFT ==
551 TEE_MATTR_CACHE_CACHED;
557 /* test attributes of target virtual buffer (in core mapping) */
558 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
562 /* Empty buffers complies with anything */
566 p = virt_to_phys((void *)vbuf);
570 return core_pbuf_is(attr, p, len);
574 /* core_va2pa - teecore exported service */
575 int core_va2pa_helper(void *va, paddr_t *pa)
577 struct tee_mmap_region *map;
579 map = find_map_by_va(va);
580 if (!va_is_in_map(map, (vaddr_t)va))
583 *pa = ((uintptr_t)va & (map->region_size - 1)) |
584 ((map->pa + (uintptr_t)va - map->va) & ~(map->region_size - 1));
588 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa)
590 if (!pa_is_in_map(map, pa))
592 return (void *)((pa & (map->region_size - 1)) |
593 ((map->va + pa - map->pa) & ~((vaddr_t)map->region_size - 1)));
597 * teecore gets some memory area definitions
599 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
601 struct tee_mmap_region *map = find_map_by_type(type);
605 *e = map->va + map->size;
612 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
614 struct tee_mmap_region *map = find_map_by_pa(pa);
617 return MEM_AREA_NOTYPE;
621 int core_tlb_maintenance(int op, unsigned int a)
624 * We're doing TLB invalidation because we've changed mapping.
625 * The dsb() makes sure that written data is visible.
630 case TLBINV_UNIFIEDTLB:
631 secure_mmu_unifiedtlbinvall();
633 case TLBINV_CURRENT_ASID:
634 secure_mmu_unifiedtlbinv_curasid();
637 secure_mmu_unifiedtlbinv_byasid(a);
640 EMSG("TLB_INV_SECURE_MVA is not yet supported!");
643 secure_mmu_unifiedtlbinvbymva(a);
651 unsigned int cache_maintenance_l1(int op, void *va, size_t len)
655 arm_cl1_d_cleanbysetway();
657 case DCACHE_AREA_CLEAN:
659 arm_cl1_d_cleanbyva(va, (char *)va + len - 1);
661 case DCACHE_INVALIDATE:
662 arm_cl1_d_invbysetway();
664 case DCACHE_AREA_INVALIDATE:
666 arm_cl1_d_invbyva(va, (char *)va + len - 1);
668 case ICACHE_INVALIDATE:
671 case ICACHE_AREA_INVALIDATE:
673 arm_cl1_i_inv(va, (char *)va + len - 1);
675 case WRITE_BUFFER_DRAIN:
676 DMSG("unsupported operation 0x%X (WRITE_BUFFER_DRAIN)",
679 case DCACHE_CLEAN_INV:
680 arm_cl1_d_cleaninvbysetway();
682 case DCACHE_AREA_CLEAN_INV:
684 arm_cl1_d_cleaninvbyva(va, (char *)va + len - 1);
687 return TEE_ERROR_NOT_IMPLEMENTED;
693 unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len)
695 unsigned int ret = TEE_SUCCESS;
696 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
698 tee_l2cc_mutex_lock();
700 case L2CACHE_INVALIDATE:
701 arm_cl2_invbyway(pl310_base());
703 case L2CACHE_AREA_INVALIDATE:
705 arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
708 arm_cl2_cleanbyway(pl310_base());
710 case L2CACHE_AREA_CLEAN:
712 arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
714 case L2CACHE_CLEAN_INV:
715 arm_cl2_cleaninvbyway(pl310_base());
717 case L2CACHE_AREA_CLEAN_INV:
719 arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
722 ret = TEE_ERROR_NOT_IMPLEMENTED;
725 tee_l2cc_mutex_unlock();
726 thread_set_exceptions(exceptions);
731 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
732 paddr_t pa, uint32_t attr)
734 assert(idx < tbl_info->num_entries);
735 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
739 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
740 paddr_t *pa, uint32_t *attr)
742 assert(idx < tbl_info->num_entries);
743 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
747 static void set_region(struct core_mmu_table_info *tbl_info,
748 struct tee_mmap_region *region)
754 /* va, len and pa should be block aligned */
755 assert(!core_mmu_get_block_offset(tbl_info, region->va));
756 assert(!core_mmu_get_block_offset(tbl_info, region->size));
757 assert(!core_mmu_get_block_offset(tbl_info, region->pa));
759 idx = core_mmu_va2idx(tbl_info, region->va);
760 end = core_mmu_va2idx(tbl_info, region->va + region->size);
764 core_mmu_set_entry(tbl_info, idx, pa, region->attr);
766 pa += 1 << tbl_info->shift;
770 #ifdef CFG_SMALL_PAGE_USER_TA
771 static void set_pg_region(struct core_mmu_table_info *dir_info,
772 struct tee_ta_region *region, struct pgt **pgt,
773 struct core_mmu_table_info *pg_info)
775 struct tee_mmap_region r = {
777 .size = region->size,
778 .attr = region->attr,
780 vaddr_t end = r.va + r.size;
781 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
784 if (!pg_info->table ||
785 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
787 * We're assigning a new translation table.
791 assert(*pgt); /* We should have alloced enough */
793 /* Virtual addresses must grow */
794 assert(r.va > pg_info->va_base);
796 idx = core_mmu_va2idx(dir_info, r.va);
797 pg_info->table = (*pgt)->tbl;
798 pg_info->va_base = core_mmu_idx2va(dir_info, idx);
799 #ifdef CFG_PAGED_USER_TA
800 assert((*pgt)->vabase == pg_info->va_base);
802 *pgt = SLIST_NEXT(*pgt, link);
804 core_mmu_set_entry(dir_info, idx,
805 virt_to_phys(pg_info->table),
809 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
811 if (!mobj_is_paged(region->mobj)) {
812 size_t granule = BIT(pg_info->shift);
813 size_t offset = r.va - region->va + region->offset;
815 if (mobj_get_pa(region->mobj, offset, granule,
816 &r.pa) != TEE_SUCCESS)
817 panic("Failed to get PA of unpaged mobj");
818 set_region(pg_info, &r);
824 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
825 struct user_ta_ctx *utc)
827 struct core_mmu_table_info pg_info;
828 struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
832 /* Find the last valid entry */
833 n = ARRAY_SIZE(utc->mmu->regions);
836 if (utc->mmu->regions[n].size)
839 return; /* Nothing to map */
843 * Allocate all page tables in advance.
845 pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->regions[0].va,
846 utc->mmu->regions[n].va + utc->mmu->regions[n].size - 1);
847 pgt = SLIST_FIRST(pgt_cache);
849 core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
851 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++)
852 mobj_update_mapping(utc->mmu->regions[n].mobj, utc,
853 utc->mmu->regions[n].va);
855 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
856 if (!utc->mmu->regions[n].size)
858 set_pg_region(dir_info, utc->mmu->regions + n, &pgt, &pg_info);
863 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
864 struct user_ta_ctx *utc)
867 struct tee_mmap_region r;
869 size_t granule = BIT(dir_info->shift);
871 memset(&r, 0, sizeof(r));
872 for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
873 if (!utc->mmu->regions[n].size)
876 offset = utc->mmu->regions[n].offset;
877 r.va = utc->mmu->regions[n].va;
878 r.size = utc->mmu->regions[n].size;
879 r.attr = utc->mmu->regions[n].attr;
881 if (mobj_get_pa(utc->mmu->regions[n].mobj, offset, granule,
882 &r.pa) != TEE_SUCCESS)
883 panic("Failed to get PA of unpaged mobj");
885 set_region(dir_info, &r);
890 bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
892 struct core_mmu_table_info tbl_info;
893 struct tee_mmap_region *map;
902 /* Check if the memory is already mapped */
903 map = find_map_by_type_and_pa(type, addr);
904 if (map && pbuf_inside_map_area(addr, len, map))
907 /* Find the reserved va space used for late mappings */
908 map = find_map_by_type(MEM_AREA_RES_VASPACE);
912 if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info))
915 granule = 1 << tbl_info.shift;
916 p = ROUNDDOWN(addr, granule);
917 l = ROUNDUP(len + addr - p, granule);
919 * Something is wrong, we can't fit the va range into the selected
920 * table. The reserved va range is possibly missaligned with
923 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
926 /* Find end of the memory map */
928 while (static_memory_map[n].type != MEM_AREA_NOTYPE)
931 if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
932 /* There's room for another entry */
933 static_memory_map[n].va = map->va;
934 static_memory_map[n].size = l;
935 static_memory_map[n + 1].type = MEM_AREA_NOTYPE;
938 map = static_memory_map + n;
941 * There isn't room for another entry, steal the reserved
942 * entry as it's not useful for anything else any longer.
947 map->region_size = granule;
948 map->attr = core_mmu_type_to_attr(type);
951 set_region(&tbl_info, map);
955 static bool arm_va2pa_helper(void *va, paddr_t *pa)
957 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
963 write_ats1cpr((vaddr_t)va);
967 par_pa_mask = PAR64_PA_MASK;
970 par_pa_mask = PAR32_PA_MASK;
975 write_at_s1e1r((vaddr_t)va);
977 par = read_par_el1();
978 par_pa_mask = PAR_PA_MASK;
982 *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
983 ((vaddr_t)va & ((1 << PAR_PA_SHIFT) - 1));
987 thread_unmask_exceptions(exceptions);
991 #ifdef CFG_WITH_PAGER
992 static vaddr_t get_linear_map_end(void)
994 /* this is synced with the generic linker file kern.ld.S */
995 return (vaddr_t)__heap2_end;
999 #if defined(CFG_TEE_CORE_DEBUG)
1000 static void check_pa_matches_va(void *va, paddr_t pa)
1003 vaddr_t v = (vaddr_t)va;
1006 if (core_mmu_user_va_range_is_defined()) {
1007 vaddr_t user_va_base;
1008 size_t user_va_size;
1010 core_mmu_get_user_va_range(&user_va_base, &user_va_size);
1011 if (v >= user_va_base &&
1012 v <= (user_va_base - 1 + user_va_size)) {
1013 if (!core_mmu_user_mapping_is_active()) {
1015 panic("issue in linear address space");
1019 res = tee_mmu_user_va2pa_helper(
1020 to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
1021 if (res == TEE_SUCCESS && pa != p)
1023 if (res != TEE_SUCCESS && pa)
1028 #ifdef CFG_WITH_PAGER
1029 if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
1031 panic("issue in linear address space");
1034 if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
1035 v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
1036 struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1040 * Lookups in the page table managed by the pager is
1041 * dangerous for addresses in the paged area as those pages
1042 * changes all the time. But some ranges are safe,
1043 * rw-locked areas when the page is populated for instance.
1045 core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
1046 if (a & TEE_MATTR_VALID_BLOCK) {
1047 paddr_t mask = ((1 << ti->shift) - 1);
1058 if (!core_va2pa_helper(va, &p)) {
1067 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
1072 paddr_t virt_to_phys(void *va)
1076 if (!arm_va2pa_helper(va, &pa))
1078 check_pa_matches_va(va, pa);
1082 #if defined(CFG_TEE_CORE_DEBUG)
1083 static void check_va_matches_pa(paddr_t pa, void *va)
1085 if (va && virt_to_phys(va) != pa)
1089 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
1094 static void *phys_to_virt_ta_vaspace(paddr_t pa)
1099 if (!core_mmu_user_mapping_is_active())
1102 res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()),
1104 if (res != TEE_SUCCESS)
1109 #ifdef CFG_WITH_PAGER
1110 static void *phys_to_virt_tee_ram(paddr_t pa)
1112 struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1118 if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
1119 return (void *)(vaddr_t)pa;
1121 end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
1122 CFG_TEE_RAM_VA_SIZE);
1123 /* Most addresses are mapped lineary, try that first if possible. */
1124 idx = core_mmu_va2idx(ti, pa);
1125 if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
1127 core_mmu_get_entry(ti, idx, &p, &a);
1128 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
1129 return (void *)core_mmu_idx2va(ti, idx);
1132 for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
1133 idx < end_idx; idx++) {
1134 core_mmu_get_entry(ti, idx, &p, &a);
1135 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
1136 return (void *)core_mmu_idx2va(ti, idx);
1142 static void *phys_to_virt_tee_ram(paddr_t pa)
1144 return map_pa2va(find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa), pa);
1148 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m)
1153 case MEM_AREA_TA_VASPACE:
1154 va = phys_to_virt_ta_vaspace(pa);
1156 case MEM_AREA_TEE_RAM:
1157 va = phys_to_virt_tee_ram(pa);
1160 va = map_pa2va(find_map_by_type_and_pa(m, pa), pa);
1162 check_va_matches_pa(pa, va);
1166 bool cpu_mmu_enabled(void)
1171 sctlr = read_sctlr();
1173 sctlr = read_sctlr_el1();
1176 return sctlr & SCTLR_M ? true : false;