2 * Copyright (c) 2015-2016, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions are met:
34 * Redistributions of source code must retain the above copyright notice, this
35 * list of conditions and the following disclaimer.
37 * Redistributions in binary form must reproduce the above copyright notice,
38 * this list of conditions and the following disclaimer in the documentation
39 * and/or other materials provided with the distribution.
41 * Neither the name of ARM nor the names of its contributors may be used
42 * to endorse or promote products derived from this software without specific
43 * prior written permission.
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
46 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
49 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
50 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
51 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
52 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
53 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
54 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
55 * POSSIBILITY OF SUCH DAMAGE.
57 #include <platform_config.h>
63 #include <kernel/thread.h>
64 #include <kernel/panic.h>
65 #include <kernel/misc.h>
66 #include <mm/core_memprot.h>
67 #include <mm/pgt_cache.h>
68 #include <mm/core_memprot.h>
71 #include <types_ext.h>
74 #include "core_mmu_private.h"
76 #ifndef DEBUG_XLAT_TABLE
77 #define DEBUG_XLAT_TABLE 0
81 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
83 #define debug_print(...) ((void)0)
88 * Miscellaneous MMU related constants
91 #define INVALID_DESC 0x0
92 #define BLOCK_DESC 0x1
93 #define L3_BLOCK_DESC 0x3
94 #define TABLE_DESC 0x3
95 #define DESC_ENTRY_TYPE_MASK 0x3
97 #define HIDDEN_DESC 0x4
98 #define HIDDEN_DIRTY_DESC 0x8
100 #define XN (1ull << 2)
101 #define PXN (1ull << 1)
102 #define CONT_HINT (1ull << 0)
104 #define UPPER_ATTRS(x) (((x) & 0x7) << 52)
105 #define NON_GLOBAL (1ull << 9)
106 #define ACCESS_FLAG (1ull << 8)
107 #define NSH (0x0 << 6)
108 #define OSH (0x2 << 6)
109 #define ISH (0x3 << 6)
111 #define AP_RO (0x1 << 5)
112 #define AP_RW (0x0 << 5)
113 #define AP_UNPRIV (0x1 << 4)
115 #define NS (0x1 << 3)
116 #define LOWER_ATTRS_SHIFT 2
117 #define LOWER_ATTRS(x) (((x) & 0xfff) << LOWER_ATTRS_SHIFT)
119 #define ATTR_DEVICE_INDEX 0x0
120 #define ATTR_IWBWA_OWBWA_NTR_INDEX 0x1
121 #define ATTR_INDEX_MASK 0x7
123 #define ATTR_DEVICE (0x4)
124 #define ATTR_IWBWA_OWBWA_NTR (0xff)
126 #define MAIR_ATTR_SET(attr, index) (((uint64_t)attr) << ((index) << 3))
128 #define OUTPUT_ADDRESS_MASK (0x0000FFFFFFFFF000ULL)
130 /* (internal) physical address size bits in EL3/EL1 */
131 #define TCR_PS_BITS_4GB (0x0)
132 #define TCR_PS_BITS_64GB (0x1)
133 #define TCR_PS_BITS_1TB (0x2)
134 #define TCR_PS_BITS_4TB (0x3)
135 #define TCR_PS_BITS_16TB (0x4)
136 #define TCR_PS_BITS_256TB (0x5)
138 #define ADDR_MASK_48_TO_63 0xFFFF000000000000ULL
139 #define ADDR_MASK_44_TO_47 0x0000F00000000000ULL
140 #define ADDR_MASK_42_TO_43 0x00000C0000000000ULL
141 #define ADDR_MASK_40_TO_41 0x0000030000000000ULL
142 #define ADDR_MASK_36_TO_39 0x000000F000000000ULL
143 #define ADDR_MASK_32_TO_35 0x0000000F00000000ULL
145 #define UNSET_DESC ((uint64_t)-1)
147 #define FOUR_KB_SHIFT 12
148 #define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
149 #define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
150 #define PAGE_SIZE_MASK (PAGE_SIZE - 1)
151 #define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
153 #define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
154 #define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
156 #define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
157 #define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
159 /* Values for number of entries in each MMU translation table */
160 #define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
161 #define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
162 #define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
164 /* Values to convert a memory address to an index into a translation table */
165 #define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
166 #define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + \
167 XLAT_TABLE_ENTRIES_SHIFT)
168 #define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + \
169 XLAT_TABLE_ENTRIES_SHIFT)
171 #define MAX_MMAP_REGIONS 16
172 #define NUM_L1_ENTRIES \
173 (CFG_LPAE_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
175 #ifndef MAX_XLAT_TABLES
176 #define MAX_XLAT_TABLES 5
179 /* MMU L1 table, one for each core */
180 static uint64_t l1_xlation_table[CFG_TEE_CORE_NB_CORE][NUM_L1_ENTRIES]
181 __aligned(NUM_L1_ENTRIES * XLAT_ENTRY_SIZE) __section(".nozi.mmu.l1");
183 static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
184 __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
186 /* MMU L2 table for TAs, one for each thread */
187 static uint64_t xlat_tables_ul1[CFG_NUM_THREADS][XLAT_TABLE_ENTRIES]
188 __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
191 static unsigned next_xlat __early_bss;
192 static uint64_t tcr_ps_bits __early_bss;
193 static int user_va_idx = -1;
195 static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
200 if (desc & HIDDEN_DESC)
201 return TEE_MATTR_HIDDEN_BLOCK;
202 if (desc & HIDDEN_DIRTY_DESC)
203 return TEE_MATTR_HIDDEN_DIRTY_BLOCK;
208 if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
211 if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
212 return TEE_MATTR_TABLE;
215 a = TEE_MATTR_VALID_BLOCK;
217 if (desc & LOWER_ATTRS(ACCESS_FLAG))
218 a |= TEE_MATTR_PRX | TEE_MATTR_URX;
220 if (!(desc & LOWER_ATTRS(AP_RO)))
221 a |= TEE_MATTR_PW | TEE_MATTR_UW;
223 if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
224 a &= ~TEE_MATTR_URWX;
226 if (desc & UPPER_ATTRS(XN))
227 a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
229 if (desc & UPPER_ATTRS(PXN))
232 COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
233 COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
234 TEE_MATTR_CACHE_CACHED);
236 a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
237 TEE_MATTR_CACHE_SHIFT;
239 if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
240 a |= TEE_MATTR_GLOBAL;
242 if (!(desc & LOWER_ATTRS(NS)))
243 a |= TEE_MATTR_SECURE;
248 static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
253 if (a & TEE_MATTR_HIDDEN_BLOCK)
254 return INVALID_DESC | HIDDEN_DESC;
256 if (a & TEE_MATTR_HIDDEN_DIRTY_BLOCK)
257 return INVALID_DESC | HIDDEN_DIRTY_DESC;
259 if (a & TEE_MATTR_TABLE)
262 if (!(a & TEE_MATTR_VALID_BLOCK))
265 if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
267 if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
269 if (a & TEE_MATTR_UR)
271 if (a & TEE_MATTR_UW)
275 desc = L3_BLOCK_DESC;
279 if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
280 desc |= UPPER_ATTRS(XN);
281 if (!(a & TEE_MATTR_PX))
282 desc |= UPPER_ATTRS(PXN);
284 if (a & TEE_MATTR_UR)
285 desc |= LOWER_ATTRS(AP_UNPRIV);
287 if (!(a & TEE_MATTR_PW))
288 desc |= LOWER_ATTRS(AP_RO);
290 /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
291 switch ((a >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
292 case TEE_MATTR_CACHE_NONCACHE:
293 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
295 case TEE_MATTR_CACHE_CACHED:
296 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
300 * "Can't happen" the attribute is supposed to be checked
301 * with core_mmu_mattr_is_ok() before.
306 if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
307 desc |= LOWER_ATTRS(ACCESS_FLAG);
309 if (!(a & TEE_MATTR_GLOBAL))
310 desc |= LOWER_ATTRS(NON_GLOBAL);
312 desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
317 static uint64_t mmap_desc(uint32_t attr, uint64_t addr_pa,
320 return mattr_to_desc(level, attr) | addr_pa;
323 static int mmap_region_attr(struct tee_mmap_region *mm, uint64_t base_va,
326 uint32_t attr = mm->attr;
332 return attr; /* Reached end of list */
334 if (mm->va >= base_va + size)
335 return attr; /* Next region is after area so end */
337 if (mm->va + mm->size <= base_va)
338 continue; /* Next region has already been overtaken */
340 if (mm->attr == attr)
341 continue; /* Region doesn't override attribs so skip */
343 if (mm->va > base_va ||
344 mm->va + mm->size < base_va + size)
345 return -1; /* Region doesn't fully cover our area */
349 static struct tee_mmap_region *init_xlation_table(struct tee_mmap_region *mm,
350 uint64_t base_va, uint64_t *table, unsigned level)
352 unsigned int level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
353 XLAT_TABLE_ENTRIES_SHIFT;
354 unsigned int level_size = BIT32(level_size_shift);
355 uint64_t level_index_mask = SHIFT_U64(XLAT_TABLE_ENTRIES_MASK,
360 debug_print("New xlat table (level %u):", level);
363 uint64_t desc = UNSET_DESC;
365 if (mm->va + mm->size <= base_va) {
366 /* Area now after the region so skip it */
372 if (mm->va >= base_va + level_size) {
373 /* Next region is after area so nothing to map yet */
375 debug_print("%*s%010" PRIx64 " %8x",
376 level * 2, "", base_va, level_size);
377 } else if (mm->va <= base_va &&
378 mm->va + mm->size >= base_va + level_size &&
379 !(mm->pa & (level_size - 1))) {
380 /* Next region covers all of area */
381 int attr = mmap_region_attr(mm, base_va, level_size);
384 desc = mmap_desc(attr,
385 base_va - mm->va + mm->pa,
387 debug_print("%*s%010" PRIx64 " %8x %s-%s-%s-%s",
388 level * 2, "", base_va, level_size,
389 attr & (TEE_MATTR_CACHE_CACHED <<
390 TEE_MATTR_CACHE_SHIFT) ?
392 attr & TEE_MATTR_PW ? "RW" : "RO",
393 attr & TEE_MATTR_PX ? "X" : "XN",
394 attr & TEE_MATTR_SECURE ? "S" : "NS");
396 debug_print("%*s%010" PRIx64 " %8x",
397 level * 2, "", base_va, level_size);
400 /* else Next region only partially covers area, so need */
402 if (desc == UNSET_DESC) {
403 /* Area not covered by a region so need finer table */
404 uint64_t *new_table = xlat_tables[next_xlat++];
405 /* Clear table before use */
406 if (next_xlat > MAX_XLAT_TABLES)
407 panic("running out of xlat tables");
408 memset(new_table, 0, XLAT_TABLE_SIZE);
410 desc = TABLE_DESC | virt_to_phys(new_table);
412 /* Recurse to fill in new table */
413 mm = init_xlation_table(mm, base_va, new_table,
418 base_va += level_size;
419 } while (mm->size && (base_va & level_index_mask));
424 static unsigned int calc_physical_addr_size_bits(uint64_t max_addr)
426 /* Physical address can't exceed 48 bits */
427 assert(!(max_addr & ADDR_MASK_48_TO_63));
429 /* 48 bits address */
430 if (max_addr & ADDR_MASK_44_TO_47)
431 return TCR_PS_BITS_256TB;
433 /* 44 bits address */
434 if (max_addr & ADDR_MASK_42_TO_43)
435 return TCR_PS_BITS_16TB;
437 /* 42 bits address */
438 if (max_addr & ADDR_MASK_40_TO_41)
439 return TCR_PS_BITS_4TB;
441 /* 40 bits address */
442 if (max_addr & ADDR_MASK_36_TO_39)
443 return TCR_PS_BITS_1TB;
445 /* 36 bits address */
446 if (max_addr & ADDR_MASK_32_TO_35)
447 return TCR_PS_BITS_64GB;
449 return TCR_PS_BITS_4GB;
452 void core_init_mmu_tables(struct tee_mmap_region *mm)
458 for (n = 0; mm[n].size; n++) {
462 debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
463 mm[n].va, mm[n].pa, mm[n].size, mm[n].attr);
465 if (!IS_PAGE_ALIGNED(mm[n].pa) || !IS_PAGE_ALIGNED(mm[n].size))
466 panic("unaligned region");
468 pa_end = mm[n].pa + mm[n].size - 1;
469 va_end = mm[n].va + mm[n].size - 1;
476 /* Clear table before use */
477 memset(l1_xlation_table[0], 0, NUM_L1_ENTRIES * XLAT_ENTRY_SIZE);
478 init_xlation_table(mm, 0, l1_xlation_table[0], 1);
479 for (n = 1; n < CFG_TEE_CORE_NB_CORE; n++)
480 memcpy(l1_xlation_table[n], l1_xlation_table[0],
481 XLAT_ENTRY_SIZE * NUM_L1_ENTRIES);
483 for (n = 1; n < NUM_L1_ENTRIES; n++) {
484 if (!l1_xlation_table[0][n]) {
489 assert(user_va_idx != -1);
491 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
492 COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_SIZE > 0);
493 assert(max_va < CFG_LPAE_ADDR_SPACE_SIZE);
496 bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
498 size_t l1size = (1 << L1_XLAT_ADDRESS_SHIFT);
499 paddr_t l1mask = l1size - 1;
501 return (paddr & l1mask) > (l1size / 2);
505 void core_init_mmu_regs(void)
507 uint32_t ttbcr = TTBCR_EAE;
511 ttbr0 = virt_to_phys(l1_xlation_table[get_core_pos()]);
513 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
514 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
517 ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
518 ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
519 ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
521 /* Disable the use of TTBR1 */
524 /* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
527 write_ttbr0_64bit(ttbr0);
528 write_ttbr1_64bit(0);
533 void core_init_mmu_regs(void)
539 ttbr0 = virt_to_phys(l1_xlation_table[get_core_pos()]);
541 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
542 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
543 write_mair_el1(mair);
546 tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
547 tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
548 tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
549 tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT;
550 tcr |= 64 - __builtin_ctzl(CFG_LPAE_ADDR_SPACE_SIZE);
552 /* Disable the use of TTBR1 */
556 * TCR.A1 = 0 => ASID is stored in TTBR0
557 * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
561 write_ttbr0_el1(ttbr0);
566 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
567 unsigned level, vaddr_t va_base, void *table)
569 tbl_info->level = level;
570 tbl_info->table = table;
571 tbl_info->va_base = va_base;
572 tbl_info->shift = L1_XLAT_ADDRESS_SHIFT -
573 (level - 1) * XLAT_TABLE_ENTRIES_SHIFT;
576 tbl_info->num_entries = NUM_L1_ENTRIES;
578 tbl_info->num_entries = XLAT_TABLE_ENTRIES;
581 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
583 vaddr_t va_range_base;
584 void *tbl = xlat_tables_ul1[thread_get_id()];
586 core_mmu_get_user_va_range(&va_range_base, NULL);
587 core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
590 void core_mmu_create_user_map(struct user_ta_ctx *utc,
591 struct core_mmu_user_map *map)
593 struct core_mmu_table_info dir_info;
595 COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
597 core_mmu_get_user_pgdir(&dir_info);
598 memset(dir_info.table, 0, PGT_SIZE);
599 core_mmu_populate_user_map(&dir_info, utc);
600 map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
601 map->asid = utc->context & TTBR_ASID_MASK;
604 bool core_mmu_find_table(vaddr_t va, unsigned max_level,
605 struct core_mmu_table_info *tbl_info)
607 uint64_t *tbl = l1_xlation_table[get_core_pos()];
611 unsigned num_entries = NUM_L1_ENTRIES;
614 unsigned level_size_shift =
615 L1_XLAT_ADDRESS_SHIFT - (level - 1) *
616 XLAT_TABLE_ENTRIES_SHIFT;
617 unsigned n = (va - va_base) >> level_size_shift;
619 if (n >= num_entries)
622 if (level == max_level || level == 3 ||
623 (tbl[n] & TABLE_DESC) != TABLE_DESC) {
625 * We've either reached max_level, level 3, a block
626 * mapping entry or an "invalid" mapping entry.
628 tbl_info->table = tbl;
629 tbl_info->va_base = va_base;
630 tbl_info->level = level;
631 tbl_info->shift = level_size_shift;
632 tbl_info->num_entries = num_entries;
636 /* Copy bits 39:12 from tbl[n] to ntbl */
637 ntbl = (tbl[n] & ((1ULL << 40) - 1)) & ~((1 << 12) - 1);
639 tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM);
643 va_base += n << level_size_shift;
645 num_entries = XLAT_TABLE_ENTRIES;
649 bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
654 uint64_t new_table_desc;
655 size_t new_entry_size;
660 if (tbl_info->level >= 3)
663 if (next_xlat >= MAX_XLAT_TABLES)
666 if (tbl_info->level == 1 && idx >= NUM_L1_ENTRIES)
669 if (tbl_info->level > 1 && idx >= XLAT_TABLE_ENTRIES)
672 entry = (uint64_t *)tbl_info->table + idx;
673 assert((*entry & DESC_ENTRY_TYPE_MASK) == BLOCK_DESC);
675 new_table = xlat_tables[next_xlat++];
676 new_table_desc = TABLE_DESC | (uint64_t)(uintptr_t)new_table;
678 /* store attributes of original block */
679 attr = desc_to_mattr(tbl_info->level, *entry);
680 paddr = *entry & OUTPUT_ADDRESS_MASK;
681 new_entry_size = 1 << (tbl_info->shift - XLAT_TABLE_ENTRIES_SHIFT);
683 /* Fill new xlat table with entries pointing to the same memory */
684 for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
685 *new_table = paddr | mattr_to_desc(tbl_info->level + 1, attr);
686 paddr += new_entry_size;
690 /* Update descriptor at current level */
691 *entry = new_table_desc;
695 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
696 paddr_t pa, uint32_t attr)
698 uint64_t *tbl = table;
699 uint64_t desc = mattr_to_desc(level, attr);
701 tbl[idx] = desc | pa;
704 void core_mmu_get_entry_primitive(const void *table, size_t level,
705 size_t idx, paddr_t *pa, uint32_t *attr)
707 const uint64_t *tbl = table;
710 *pa = (tbl[idx] & ((1ull << 40) - 1)) & ~((1 << 12) - 1);
713 *attr = desc_to_mattr(level, tbl[idx]);
716 bool core_mmu_user_va_range_is_defined(void)
718 return user_va_idx != -1;
721 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
723 assert(user_va_idx != -1);
726 *base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
728 *size = 1 << L1_XLAT_ADDRESS_SHIFT;
731 bool core_mmu_user_mapping_is_active(void)
733 assert(user_va_idx != -1);
734 return !!l1_xlation_table[get_core_pos()][user_va_idx];
738 void core_mmu_get_user_map(struct core_mmu_user_map *map)
740 assert(user_va_idx != -1);
742 map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
744 map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
751 void core_mmu_set_user_map(struct core_mmu_user_map *map)
754 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
756 assert(user_va_idx != -1);
758 ttbr = read_ttbr0_64bit();
760 ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
761 write_ttbr0_64bit(ttbr);
764 /* Set the new map */
765 if (map && map->user_map) {
766 l1_xlation_table[get_core_pos()][user_va_idx] = map->user_map;
767 dsb(); /* Make sure the write above is visible */
768 ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
769 write_ttbr0_64bit(ttbr);
772 l1_xlation_table[get_core_pos()][user_va_idx] = 0;
773 dsb(); /* Make sure the write above is visible */
776 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
778 thread_unmask_exceptions(exceptions);
781 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
783 assert(fault_descr & FSR_LPAE);
785 switch (fault_descr & FSR_STATUS_MASK) {
786 case 0x21: /* b100001 Alignment fault */
787 return CORE_MMU_FAULT_ALIGNMENT;
788 case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
789 return CORE_MMU_FAULT_ASYNC_EXTERNAL;
790 case 0x12: /* b100010 Debug event */
791 return CORE_MMU_FAULT_DEBUG_EVENT;
796 switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
797 case 0x1: /* b0001LL Translation fault */
798 return CORE_MMU_FAULT_TRANSLATION;
799 case 0x2: /* b0010LL Access flag fault */
800 case 0x3: /* b0011LL Permission fault */
801 if (fault_descr & FSR_WNR)
802 return CORE_MMU_FAULT_WRITE_PERMISSION;
804 return CORE_MMU_FAULT_READ_PERMISSION;
806 return CORE_MMU_FAULT_OTHER;
812 void core_mmu_get_user_map(struct core_mmu_user_map *map)
814 assert(user_va_idx != -1);
816 map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
818 map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
825 void core_mmu_set_user_map(struct core_mmu_user_map *map)
828 uint32_t daif = read_daif();
830 write_daif(daif | DAIF_AIF);
832 ttbr = read_ttbr0_el1();
834 ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
835 write_ttbr0_el1(ttbr);
838 /* Set the new map */
839 if (map && map->user_map) {
840 l1_xlation_table[get_core_pos()][user_va_idx] = map->user_map;
841 dsb(); /* Make sure the write above is visible */
842 ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
843 write_ttbr0_el1(ttbr);
846 l1_xlation_table[get_core_pos()][user_va_idx] = 0;
847 dsb(); /* Make sure the write above is visible */
850 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
855 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
857 switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
858 case ESR_EC_SP_ALIGN:
859 case ESR_EC_PC_ALIGN:
860 return CORE_MMU_FAULT_ALIGNMENT;
861 case ESR_EC_IABT_EL0:
862 case ESR_EC_DABT_EL0:
863 case ESR_EC_IABT_EL1:
864 case ESR_EC_DABT_EL1:
865 switch (fault_descr & ESR_FSC_MASK) {
866 case ESR_FSC_TRANS_L0:
867 case ESR_FSC_TRANS_L1:
868 case ESR_FSC_TRANS_L2:
869 case ESR_FSC_TRANS_L3:
870 return CORE_MMU_FAULT_TRANSLATION;
871 case ESR_FSC_ACCF_L1:
872 case ESR_FSC_ACCF_L2:
873 case ESR_FSC_ACCF_L3:
874 case ESR_FSC_PERMF_L1:
875 case ESR_FSC_PERMF_L2:
876 case ESR_FSC_PERMF_L3:
877 if (fault_descr & ESR_ABT_WNR)
878 return CORE_MMU_FAULT_WRITE_PERMISSION;
880 return CORE_MMU_FAULT_READ_PERMISSION;
882 return CORE_MMU_FAULT_ALIGNMENT;
884 return CORE_MMU_FAULT_OTHER;
887 return CORE_MMU_FAULT_OTHER;