2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <mm/core_memprot.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_pager.h>
42 #include <types_ext.h>
44 #include <tee_api_defines.h>
45 #include <tee/tee_cryp_provider.h>
47 #include <utee_defines.h>
50 #include "pager_private.h"
52 #define PAGER_AE_KEY_BITS 256
54 struct pager_rw_pstate {
56 uint8_t tag[PAGER_AES_GCM_TAG_LEN];
65 struct tee_pager_area {
67 const uint8_t *hashes;
68 struct pager_rw_pstate *rwp;
76 TAILQ_ENTRY(tee_pager_area) link;
79 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
81 static struct tee_pager_area_head tee_pager_area_head =
82 TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
84 #define INVALID_PGIDX UINT_MAX
87 * struct tee_pager_pmem - Represents a physical page used for paging.
89 * @pgidx an index of the entry in area->ti.
90 * @va_alias Virtual address where the physical page always is aliased.
91 * Used during remapping of the page when the content need to
92 * be updated before it's available at the new location.
93 * @area a pointer to the pager area
95 struct tee_pager_pmem {
98 struct tee_pager_area *area;
99 TAILQ_ENTRY(tee_pager_pmem) link;
102 /* The list of physical pages. The first page in the list is the oldest */
103 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
105 static struct tee_pager_pmem_head tee_pager_pmem_head =
106 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
108 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
109 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
111 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
113 /* number of pages hidden */
114 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
116 /* Number of registered physical pages, used hiding pages. */
117 static size_t tee_pager_npages;
119 #ifdef CFG_WITH_STATS
120 static struct tee_pager_stats pager_stats;
122 static inline void incr_ro_hits(void)
124 pager_stats.ro_hits++;
127 static inline void incr_rw_hits(void)
129 pager_stats.rw_hits++;
132 static inline void incr_hidden_hits(void)
134 pager_stats.hidden_hits++;
137 static inline void incr_zi_released(void)
139 pager_stats.zi_released++;
142 static inline void incr_npages_all(void)
144 pager_stats.npages_all++;
147 static inline void set_npages(void)
149 pager_stats.npages = tee_pager_npages;
152 void tee_pager_get_stats(struct tee_pager_stats *stats)
154 *stats = pager_stats;
156 pager_stats.hidden_hits = 0;
157 pager_stats.ro_hits = 0;
158 pager_stats.rw_hits = 0;
159 pager_stats.zi_released = 0;
162 #else /* CFG_WITH_STATS */
163 static inline void incr_ro_hits(void) { }
164 static inline void incr_rw_hits(void) { }
165 static inline void incr_hidden_hits(void) { }
166 static inline void incr_zi_released(void) { }
167 static inline void incr_npages_all(void) { }
168 static inline void set_npages(void) { }
170 void tee_pager_get_stats(struct tee_pager_stats *stats)
172 memset(stats, 0, sizeof(struct tee_pager_stats));
174 #endif /* CFG_WITH_STATS */
176 static struct pgt pager_core_pgt;
177 struct core_mmu_table_info tee_pager_tbl_info;
178 static struct core_mmu_table_info pager_alias_tbl_info;
180 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
182 /* Defines the range of the alias area */
183 static tee_mm_entry_t *pager_alias_area;
185 * Physical pages are added in a stack like fashion to the alias area,
186 * @pager_alias_next_free gives the address of next free entry if
187 * @pager_alias_next_free is != 0
189 static uintptr_t pager_alias_next_free;
191 static uint32_t pager_lock(void)
193 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
195 cpu_spin_lock(&pager_spinlock);
199 static void pager_unlock(uint32_t exceptions)
201 cpu_spin_unlock(&pager_spinlock);
202 thread_set_exceptions(exceptions);
205 static void set_alias_area(tee_mm_entry_t *mm)
207 struct core_mmu_table_info *ti = &pager_alias_tbl_info;
211 vaddr_t smem = tee_mm_get_smem(mm);
212 size_t nbytes = tee_mm_get_bytes(mm);
214 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
216 if (pager_alias_area)
217 panic("null pager_alias_area");
219 if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
220 panic("Can't find translation table");
222 if ((1 << ti->shift) != SMALL_PAGE_SIZE)
223 panic("Unsupported page size in translation table");
225 tbl_va_size = (1 << ti->shift) * ti->num_entries;
226 if (!core_is_buffer_inside(smem, nbytes,
227 ti->va_base, tbl_va_size)) {
228 EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
229 smem, nbytes, ti->va_base, tbl_va_size);
233 if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
234 panic("invalid area alignment");
236 pager_alias_area = mm;
237 pager_alias_next_free = smem;
239 /* Clear all mapping in the alias area */
240 idx = core_mmu_va2idx(ti, smem);
241 last_idx = core_mmu_va2idx(ti, smem + nbytes);
242 for (; idx < last_idx; idx++)
243 core_mmu_set_entry(ti, idx, 0, 0);
245 /* TODO only invalidate entries touched above */
246 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
249 static void generate_ae_key(void)
251 if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
252 panic("failed to generate random");
255 void tee_pager_init(tee_mm_entry_t *mm_alias)
257 set_alias_area(mm_alias);
261 static void *pager_add_alias_page(paddr_t pa)
264 struct core_mmu_table_info *ti = &pager_alias_tbl_info;
265 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
266 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
267 TEE_MATTR_SECURE | TEE_MATTR_PRW;
269 DMSG("0x%" PRIxPA, pa);
271 if (!pager_alias_next_free || !ti->num_entries)
272 panic("invalid alias entry");
274 idx = core_mmu_va2idx(ti, pager_alias_next_free);
275 core_mmu_set_entry(ti, idx, pa, attr);
276 pgt_inc_used_entries(&pager_core_pgt);
277 pager_alias_next_free += SMALL_PAGE_SIZE;
278 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
279 tee_mm_get_bytes(pager_alias_area)))
280 pager_alias_next_free = 0;
281 return (void *)core_mmu_idx2va(ti, idx);
284 static struct tee_pager_area *alloc_area(struct pgt *pgt,
285 vaddr_t base, size_t size,
286 uint32_t flags, const void *store,
289 struct tee_pager_area *area = calloc(1, sizeof(*area));
291 tee_mm_entry_t *mm_store = NULL;
296 if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
297 if (flags & TEE_MATTR_LOCKED) {
301 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
304 area->store = phys_to_virt(tee_mm_get_smem(mm_store),
308 area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
309 sizeof(struct pager_rw_pstate));
314 area->store = (void *)store;
315 area->u.hashes = hashes;
326 tee_mm_free(mm_store);
332 static void area_insert_tail(struct tee_pager_area *area)
334 uint32_t exceptions = pager_lock();
336 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
338 pager_unlock(exceptions);
340 KEEP_PAGER(area_insert_tail);
342 static size_t tbl_usage_count(struct pgt *pgt)
348 for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
349 core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
357 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
358 const void *store, const void *hashes)
360 struct tee_pager_area *area;
362 struct core_mmu_table_info *ti = &tee_pager_tbl_info;
364 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
365 base, base + size, flags, store, hashes);
367 if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
368 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
372 if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
373 panic("write pages cannot provide store or hashes");
375 if ((flags & TEE_MATTR_PW) && (store || hashes))
376 panic("non-write pages must provide store and hashes");
378 if (!pager_core_pgt.tbl) {
379 pager_core_pgt.tbl = ti->table;
380 pgt_set_used_entries(&pager_core_pgt,
381 tbl_usage_count(&pager_core_pgt));
384 tbl_va_size = (1 << ti->shift) * ti->num_entries;
385 if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
386 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
387 base, size, ti->va_base, tbl_va_size);
391 area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
395 area_insert_tail(area);
399 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
402 struct tee_pager_area *area;
407 TAILQ_FOREACH(area, areas, link) {
408 if (core_is_buffer_inside(va, 1, area->base, area->size))
414 #ifdef CFG_PAGED_USER_TA
415 static struct tee_pager_area *find_uta_area(vaddr_t va)
417 struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
419 if (!ctx || !is_user_ta_ctx(ctx))
421 return find_area(to_user_ta_ctx(ctx)->areas, va);
424 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
428 #endif /*CFG_PAGED_USER_TA*/
431 static uint32_t get_area_mattr(uint32_t area_flags)
433 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
434 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
435 (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
437 if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
438 attr |= TEE_MATTR_GLOBAL;
443 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
448 idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
449 core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
453 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
456 struct pager_aes_gcm_iv iv = {
457 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
460 return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
461 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
464 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
466 struct pager_aes_gcm_iv iv;
468 assert((rwp->iv + 1) > rwp->iv);
471 * IV is constructed as recommended in section "8.2.1 Deterministic
472 * Construction" of "Recommendation for Block Cipher Modes of
473 * Operation: Galois/Counter Mode (GCM) and GMAC",
474 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
476 iv.iv[0] = (vaddr_t)rwp;
477 iv.iv[1] = rwp->iv >> 32;
480 if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
482 src, dst, SMALL_PAGE_SIZE))
486 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
489 size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
490 const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
492 switch (area->type) {
495 const void *hash = area->u.hashes +
496 idx * TEE_SHA256_HASH_SIZE;
498 memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
501 if (hash_sha256_check(hash, va_alias,
502 SMALL_PAGE_SIZE) != TEE_SUCCESS) {
503 EMSG("PH 0x%" PRIxVA " failed", page_va);
509 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
510 va_alias, page_va, area->u.rwp[idx].iv);
511 if (!area->u.rwp[idx].iv)
512 memset(va_alias, 0, SMALL_PAGE_SIZE);
513 else if (!decrypt_page(&area->u.rwp[idx], stored_page,
515 EMSG("PH 0x%" PRIxVA " failed", page_va);
521 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
522 memset(va_alias, 0, SMALL_PAGE_SIZE);
529 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
531 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
532 TEE_MATTR_HIDDEN_DIRTY_BLOCK;
534 if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
535 size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
536 size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
537 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
539 assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
540 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
542 FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
543 pmem->area->base + idx * SMALL_PAGE_SIZE,
544 pmem->area->u.rwp[idx].iv);
548 static void area_get_entry(struct tee_pager_area *area, size_t idx,
549 paddr_t *pa, uint32_t *attr)
552 assert(idx < tee_pager_tbl_info.num_entries);
553 core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
557 static void area_set_entry(struct tee_pager_area *area, size_t idx,
558 paddr_t pa, uint32_t attr)
561 assert(idx < tee_pager_tbl_info.num_entries);
562 core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
566 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
568 return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
571 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
574 return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
577 #ifdef CFG_PAGED_USER_TA
578 static void free_area(struct tee_pager_area *area)
580 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
581 virt_to_phys(area->store)));
582 if (area->type == AREA_TYPE_RW)
587 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
590 struct tee_pager_area *area;
593 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
596 utc->areas = malloc(sizeof(*utc->areas));
599 TAILQ_INIT(utc->areas);
602 flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
607 if (find_area(utc->areas, b))
610 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
612 /* Table info will be set when the context is activated. */
613 area = alloc_area(NULL, b, s2, flags, NULL, NULL);
616 TAILQ_INSERT_TAIL(utc->areas, area, link);
624 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
626 struct thread_specific_data *tsd = thread_get_tsd();
627 struct tee_pager_area *area;
628 struct core_mmu_table_info dir_info = { NULL };
630 if (&utc->ctx != tsd->ctx) {
632 * Changes are to an utc that isn't active. Just add the
633 * areas page tables will be dealt with later.
635 return pager_add_uta_area(utc, base, size);
639 * Assign page tables before adding areas to be able to tell which
640 * are newly added and should be removed in case of failure.
642 tee_pager_assign_uta_tables(utc);
643 if (!pager_add_uta_area(utc, base, size)) {
644 struct tee_pager_area *next_a;
646 /* Remove all added areas */
647 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
649 TAILQ_REMOVE(utc->areas, area, link);
657 * Assign page tables to the new areas and make sure that the page
658 * tables are registered in the upper table.
660 tee_pager_assign_uta_tables(utc);
661 core_mmu_get_user_pgdir(&dir_info);
662 TAILQ_FOREACH(area, utc->areas, link) {
667 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
668 core_mmu_get_entry(&dir_info, idx, &pa, &attr);
671 * Check if the page table already is used, if it is, it's
672 * already registered.
674 if (area->pgt->num_used_entries) {
675 assert(attr & TEE_MATTR_TABLE);
676 assert(pa == virt_to_phys(area->pgt->tbl));
680 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
681 pa = virt_to_phys(area->pgt->tbl);
684 * Note that the update of the table entry is guaranteed to
687 core_mmu_set_entry(&dir_info, idx, pa, attr);
693 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
697 ti->table = pgt->tbl;
698 ti->va_base = pgt->vabase;
699 ti->level = tee_pager_tbl_info.level;
700 ti->shift = tee_pager_tbl_info.shift;
701 ti->num_entries = tee_pager_tbl_info.num_entries;
704 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
707 uint32_t exceptions = pager_lock();
710 * If there's no pgt assigned to the old area there's no pages to
711 * deal with either, just update with a new pgt and base.
714 struct core_mmu_table_info old_ti;
715 struct core_mmu_table_info new_ti;
716 struct tee_pager_pmem *pmem;
718 init_tbl_info_from_pgt(&old_ti, area->pgt);
719 init_tbl_info_from_pgt(&new_ti, new_pgt);
722 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
727 if (pmem->area != area)
729 core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
730 core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
732 assert(pa == get_pmem_pa(pmem));
734 assert(area->pgt->num_used_entries);
735 area->pgt->num_used_entries--;
737 va = core_mmu_idx2va(&old_ti, pmem->pgidx);
738 va = va - area->base + new_base;
739 pmem->pgidx = core_mmu_va2idx(&new_ti, va);
740 core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
741 new_pgt->num_used_entries++;
746 area->base = new_base;
747 pager_unlock(exceptions);
749 KEEP_PAGER(transpose_area);
751 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
753 struct user_ta_ctx *dst_utc,
754 vaddr_t dst_base, struct pgt **dst_pgt,
757 struct tee_pager_area *area;
758 struct tee_pager_area *next_a;
760 TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
761 vaddr_t new_area_base;
764 if (!core_is_buffer_inside(area->base, area->size,
768 TAILQ_REMOVE(src_utc->areas, area, link);
770 new_area_base = dst_base + (src_base - area->base);
771 new_idx = (new_area_base - dst_pgt[0]->vabase) /
773 assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
774 dst_pgt[new_idx]->vabase);
775 transpose_area(area, dst_pgt[new_idx], new_area_base);
778 * Assert that this will not cause any conflicts in the new
779 * utc. This should already be guaranteed, but a bug here
780 * could be tricky to find.
782 assert(!find_area(dst_utc->areas, area->base));
783 TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
787 static void rem_area(struct tee_pager_area_head *area_head,
788 struct tee_pager_area *area)
790 struct tee_pager_pmem *pmem;
793 exceptions = pager_lock();
795 TAILQ_REMOVE(area_head, area, link);
797 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
798 if (pmem->area == area) {
799 area_set_entry(area, pmem->pgidx, 0, 0);
800 pgt_dec_used_entries(area->pgt);
802 pmem->pgidx = INVALID_PGIDX;
806 pager_unlock(exceptions);
809 KEEP_PAGER(rem_area);
811 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
814 struct tee_pager_area *area;
815 struct tee_pager_area *next_a;
816 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
818 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
819 if (core_is_buffer_inside(area->base, area->size, base, s))
820 rem_area(utc->areas, area);
824 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
826 struct tee_pager_area *area;
832 area = TAILQ_FIRST(utc->areas);
835 TAILQ_REMOVE(utc->areas, area, link);
842 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
843 size_t size, uint32_t flags)
849 struct tee_pager_area *area = find_area(utc->areas, b);
851 struct tee_pager_pmem *pmem;
856 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
857 if (f & TEE_MATTR_UW)
859 f = get_area_mattr(f);
861 exceptions = pager_lock();
864 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
865 if (!area || area->base != b || area->size != s2) {
872 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
873 if (pmem->area != area)
875 area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
876 if (a & TEE_MATTR_VALID_BLOCK)
877 assert(pa == get_pmem_pa(pmem));
879 pa = get_pmem_pa(pmem);
882 area_set_entry(pmem->area, pmem->pgidx, 0, 0);
883 /* TODO only invalidate entries touched above */
884 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
885 if (!(flags & TEE_MATTR_UW))
886 tee_pager_save_page(pmem, a);
888 area_set_entry(pmem->area, pmem->pgidx, pa, f);
890 if (flags & TEE_MATTR_UX) {
891 void *va = (void *)area_idx2va(pmem->area,
894 cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
896 cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va,
902 area = TAILQ_NEXT(area, link);
907 pager_unlock(exceptions);
910 KEEP_PAGER(tee_pager_set_uta_area_attr);
911 #endif /*CFG_PAGED_USER_TA*/
913 static bool tee_pager_unhide_page(vaddr_t page_va)
915 struct tee_pager_pmem *pmem;
917 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
921 if (pmem->pgidx == INVALID_PGIDX)
924 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
927 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
930 if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
931 uint32_t a = get_area_mattr(pmem->area->flags);
933 /* page is hidden, show and move to back */
934 if (pa != get_pmem_pa(pmem))
935 panic("unexpected pa");
938 * If it's not a dirty block, then it should be
941 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
942 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
944 FMSG("Unhide %#" PRIxVA, page_va);
946 if (page_va == 0x8000a000)
947 FMSG("unhide %#" PRIxVA " a %#" PRIX32,
949 area_set_entry(pmem->area, pmem->pgidx, pa, a);
951 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
952 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
954 /* TODO only invalidate entry touched above */
955 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
965 static void tee_pager_hide_pages(void)
967 struct tee_pager_pmem *pmem;
970 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
975 if (n >= TEE_PAGER_NHIDE)
979 /* we cannot hide pages when pmem->area is not defined. */
983 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
984 if (!(attr & TEE_MATTR_VALID_BLOCK))
987 assert(pa == get_pmem_pa(pmem));
988 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
989 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
990 FMSG("Hide %#" PRIxVA,
991 area_idx2va(pmem->area, pmem->pgidx));
993 a = TEE_MATTR_HIDDEN_BLOCK;
994 area_set_entry(pmem->area, pmem->pgidx, pa, a);
997 /* TODO only invalidate entries touched above */
998 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1002 * Find mapped pmem, hide and move to pageble pmem.
1003 * Return false if page was not mapped, and true if page was mapped.
1005 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1008 struct tee_pager_pmem *pmem;
1013 pgidx = area_va2idx(area, page_va);
1014 area_get_entry(area, pgidx, &pa, &attr);
1016 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1018 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1019 if (pmem->area != area || pmem->pgidx != pgidx)
1022 assert(pa == get_pmem_pa(pmem));
1023 area_set_entry(area, pgidx, 0, 0);
1024 pgt_dec_used_entries(area->pgt);
1025 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1027 pmem->pgidx = INVALID_PGIDX;
1030 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1038 /* Finds the oldest page and unmats it from its old virtual address */
1039 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1041 struct tee_pager_pmem *pmem;
1043 pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1045 EMSG("No pmem entries");
1048 if (pmem->pgidx != INVALID_PGIDX) {
1051 assert(pmem->area && pmem->area->pgt);
1052 area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1053 area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1054 pgt_dec_used_entries(pmem->area->pgt);
1055 /* TODO only invalidate entries touched above */
1056 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1057 tee_pager_save_page(pmem, a);
1060 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1061 pmem->pgidx = INVALID_PGIDX;
1063 if (area->type == AREA_TYPE_LOCK) {
1064 /* Move page to lock list */
1065 if (tee_pager_npages <= 0)
1066 panic("running out of page");
1069 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1071 /* move page to back */
1072 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1078 static bool pager_update_permissions(struct tee_pager_area *area,
1079 struct abort_info *ai, bool *handled)
1081 unsigned int pgidx = area_va2idx(area, ai->va);
1087 area_get_entry(area, pgidx, &pa, &attr);
1090 if (!(attr & TEE_MATTR_VALID_BLOCK))
1093 /* Not readable, should not happen */
1094 if (abort_is_user_exception(ai)) {
1095 if (!(attr & TEE_MATTR_UR))
1098 if (!(attr & TEE_MATTR_PR)) {
1099 abort_print_error(ai);
1104 switch (core_mmu_get_fault_type(ai->fault_descr)) {
1105 case CORE_MMU_FAULT_TRANSLATION:
1106 case CORE_MMU_FAULT_READ_PERMISSION:
1107 if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1108 /* Check attempting to execute from an NOX page */
1109 if (abort_is_user_exception(ai)) {
1110 if (!(attr & TEE_MATTR_UX))
1113 if (!(attr & TEE_MATTR_PX)) {
1114 abort_print_error(ai);
1119 /* Since the page is mapped now it's OK */
1121 case CORE_MMU_FAULT_WRITE_PERMISSION:
1122 /* Check attempting to write to an RO page */
1123 if (abort_is_user_exception(ai)) {
1124 if (!(area->flags & TEE_MATTR_UW))
1126 if (!(attr & TEE_MATTR_UW)) {
1128 (void *)(ai->va & ~SMALL_PAGE_MASK));
1129 area_set_entry(area, pgidx, pa,
1130 get_area_mattr(area->flags));
1131 /* TODO only invalidate entry above */
1132 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1136 if (!(area->flags & TEE_MATTR_PW)) {
1137 abort_print_error(ai);
1140 if (!(attr & TEE_MATTR_PW)) {
1142 (void *)(ai->va & ~SMALL_PAGE_MASK));
1143 area_set_entry(area, pgidx, pa,
1144 get_area_mattr(area->flags));
1145 /* TODO only invalidate entry above */
1146 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1149 /* Since permissions has been updated now it's OK */
1152 /* Some fault we can't deal with */
1153 if (abort_is_user_exception(ai))
1155 abort_print_error(ai);
1162 #ifdef CFG_TEE_CORE_DEBUG
1163 static void stat_handle_fault(void)
1165 static size_t num_faults;
1166 static size_t min_npages = SIZE_MAX;
1167 static size_t total_min_npages = SIZE_MAX;
1170 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1171 DMSG("nfaults %zu npages %zu (min %zu)",
1172 num_faults, tee_pager_npages, min_npages);
1173 min_npages = tee_pager_npages; /* reset */
1175 if (tee_pager_npages < min_npages)
1176 min_npages = tee_pager_npages;
1177 if (tee_pager_npages < total_min_npages)
1178 total_min_npages = tee_pager_npages;
1181 static void stat_handle_fault(void)
1186 bool tee_pager_handle_fault(struct abort_info *ai)
1188 struct tee_pager_area *area;
1189 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1190 uint32_t exceptions;
1193 #ifdef TEE_PAGER_DEBUG_PRINT
1198 * We're updating pages that can affect several active CPUs at a
1199 * time below. We end up here because a thread tries to access some
1200 * memory that isn't available. We have to be careful when making
1201 * that memory available as other threads may succeed in accessing
1202 * that address the moment after we've made it available.
1204 * That means that we can't just map the memory and populate the
1205 * page, instead we use the aliased mapping to populate the page
1206 * and once everything is ready we map it.
1208 exceptions = pager_lock();
1210 stat_handle_fault();
1212 /* check if the access is valid */
1213 if (abort_is_user_exception(ai)) {
1214 area = find_uta_area(ai->va);
1217 area = find_area(&tee_pager_area_head, ai->va);
1219 area = find_uta_area(ai->va);
1221 if (!area || !area->pgt) {
1226 if (!tee_pager_unhide_page(page_va)) {
1227 struct tee_pager_pmem *pmem = NULL;
1231 * The page wasn't hidden, but some other core may have
1232 * updated the table entry before we got here or we need
1233 * to make a read-only page read-write (dirty).
1235 if (pager_update_permissions(area, ai, &ret)) {
1237 * Nothing more to do with the abort. The problem
1238 * could already have been dealt with from another
1239 * core or if ret is false the TA will be paniced.
1244 pmem = tee_pager_get_page(area);
1250 /* load page code & data */
1251 tee_pager_load_page(area, page_va, pmem->va_alias);
1254 * We've updated the page using the aliased mapping and
1255 * some cache maintenence is now needed if it's an
1258 * Since the d-cache is a Physically-indexed,
1259 * physically-tagged (PIPT) cache we can clean the aliased
1260 * address instead of the real virtual address.
1262 * The i-cache can also be PIPT, but may be something else
1263 * to, to keep it simple we invalidate the entire i-cache.
1264 * As a future optimization we may invalidate only the
1265 * aliased area if it a PIPT cache else the entire cache.
1267 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1269 * Doing these operations to LoUIS (Level of
1270 * unification, Inner Shareable) would be enough
1272 cache_maintenance_l1(DCACHE_AREA_CLEAN,
1273 pmem->va_alias, SMALL_PAGE_SIZE);
1275 cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
1279 pmem->pgidx = area_va2idx(area, ai->va);
1280 attr = get_area_mattr(area->flags) &
1281 ~(TEE_MATTR_PW | TEE_MATTR_UW);
1282 area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1283 pgt_inc_used_entries(area->pgt);
1285 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1286 area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1290 tee_pager_hide_pages();
1293 pager_unlock(exceptions);
1297 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1299 struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1302 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1303 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1306 for (n = 0; n < npages; n++) {
1307 struct tee_pager_pmem *pmem;
1308 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1309 unsigned pgidx = core_mmu_va2idx(ti, va);
1314 * Note that we can only support adding pages in the
1315 * valid range of this table info, currently not a problem.
1317 core_mmu_get_entry(ti, pgidx, &pa, &attr);
1319 /* Ignore unmapped pages/blocks */
1320 if (!(attr & TEE_MATTR_VALID_BLOCK))
1323 pmem = malloc(sizeof(struct tee_pager_pmem));
1325 panic("out of mem");
1327 pmem->va_alias = pager_add_alias_page(pa);
1331 pmem->pgidx = INVALID_PGIDX;
1332 core_mmu_set_entry(ti, pgidx, 0, 0);
1333 pgt_dec_used_entries(&pager_core_pgt);
1336 * The page is still mapped, let's assign the area
1337 * and update the protection bits accordingly.
1339 pmem->area = find_area(&tee_pager_area_head, va);
1340 assert(pmem->area->pgt == &pager_core_pgt);
1341 pmem->pgidx = pgidx;
1342 assert(pa == get_pmem_pa(pmem));
1343 area_set_entry(pmem->area, pgidx, pa,
1344 get_area_mattr(pmem->area->flags));
1350 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1353 /* Invalidate secure TLB */
1354 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1357 #ifdef CFG_PAGED_USER_TA
1358 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1360 struct pgt *p = pgt;
1362 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1363 p = SLIST_NEXT(p, link);
1367 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1369 struct tee_pager_area *area;
1370 struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1372 TAILQ_FOREACH(area, utc->areas, link) {
1374 area->pgt = find_pgt(pgt, area->base);
1376 assert(area->pgt == find_pgt(pgt, area->base));
1382 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1386 assert(pmem->area && pmem->area->pgt);
1388 area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1389 area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1390 tee_pager_save_page(pmem, attr);
1391 assert(pmem->area->pgt->num_used_entries);
1392 pmem->area->pgt->num_used_entries--;
1393 pmem->pgidx = INVALID_PGIDX;
1397 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1399 struct tee_pager_pmem *pmem;
1400 struct tee_pager_area *area;
1401 uint32_t exceptions = pager_lock();
1403 if (!pgt->num_used_entries)
1406 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1407 if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1409 if (pmem->area->pgt == pgt)
1410 pager_save_and_release_entry(pmem);
1412 assert(!pgt->num_used_entries);
1415 if (is_user_ta_ctx(pgt->ctx)) {
1416 TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1417 if (area->pgt == pgt)
1422 pager_unlock(exceptions);
1424 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1425 #endif /*CFG_PAGED_USER_TA*/
1427 void tee_pager_release_phys(void *addr, size_t size)
1429 bool unmaped = false;
1430 vaddr_t va = (vaddr_t)addr;
1431 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1432 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1433 struct tee_pager_area *area;
1434 uint32_t exceptions;
1439 area = find_area(&tee_pager_area_head, begin);
1441 area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1444 exceptions = pager_lock();
1446 for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1447 unmaped |= tee_pager_release_one_phys(area, va);
1449 /* Invalidate secure TLB */
1451 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1453 pager_unlock(exceptions);
1455 KEEP_PAGER(tee_pager_release_phys);
1457 void *tee_pager_alloc(size_t size, uint32_t flags)
1460 uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1465 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1469 tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1472 return (void *)tee_mm_get_smem(mm);