Update from upstream to 2.4.0 version
[platform/core/security/tef-optee_os.git] / core / arch / arm / mm / tee_pager.c
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <arm.h>
30 #include <assert.h>
31 #include <keep.h>
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <mm/core_memprot.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_pager.h>
42 #include <types_ext.h>
43 #include <stdlib.h>
44 #include <tee_api_defines.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <trace.h>
47 #include <utee_defines.h>
48 #include <util.h>
49
50 #include "pager_private.h"
51
52 #define PAGER_AE_KEY_BITS       256
53
54 struct pager_rw_pstate {
55         uint64_t iv;
56         uint8_t tag[PAGER_AES_GCM_TAG_LEN];
57 };
58
59 enum area_type {
60         AREA_TYPE_RO,
61         AREA_TYPE_RW,
62         AREA_TYPE_LOCK,
63 };
64
65 struct tee_pager_area {
66         union {
67                 const uint8_t *hashes;
68                 struct pager_rw_pstate *rwp;
69         } u;
70         uint8_t *store;
71         enum area_type type;
72         uint32_t flags;
73         vaddr_t base;
74         size_t size;
75         struct pgt *pgt;
76         TAILQ_ENTRY(tee_pager_area) link;
77 };
78
79 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
80
81 static struct tee_pager_area_head tee_pager_area_head =
82         TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
83
84 #define INVALID_PGIDX   UINT_MAX
85
86 /*
87  * struct tee_pager_pmem - Represents a physical page used for paging.
88  *
89  * @pgidx       an index of the entry in area->ti.
90  * @va_alias    Virtual address where the physical page always is aliased.
91  *              Used during remapping of the page when the content need to
92  *              be updated before it's available at the new location.
93  * @area        a pointer to the pager area
94  */
95 struct tee_pager_pmem {
96         unsigned pgidx;
97         void *va_alias;
98         struct tee_pager_area *area;
99         TAILQ_ENTRY(tee_pager_pmem) link;
100 };
101
102 /* The list of physical pages. The first page in the list is the oldest */
103 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
104
105 static struct tee_pager_pmem_head tee_pager_pmem_head =
106         TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
107
108 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
109         TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
110
111 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
112
113 /* number of pages hidden */
114 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
115
116 /* Number of registered physical pages, used hiding pages. */
117 static size_t tee_pager_npages;
118
119 #ifdef CFG_WITH_STATS
120 static struct tee_pager_stats pager_stats;
121
122 static inline void incr_ro_hits(void)
123 {
124         pager_stats.ro_hits++;
125 }
126
127 static inline void incr_rw_hits(void)
128 {
129         pager_stats.rw_hits++;
130 }
131
132 static inline void incr_hidden_hits(void)
133 {
134         pager_stats.hidden_hits++;
135 }
136
137 static inline void incr_zi_released(void)
138 {
139         pager_stats.zi_released++;
140 }
141
142 static inline void incr_npages_all(void)
143 {
144         pager_stats.npages_all++;
145 }
146
147 static inline void set_npages(void)
148 {
149         pager_stats.npages = tee_pager_npages;
150 }
151
152 void tee_pager_get_stats(struct tee_pager_stats *stats)
153 {
154         *stats = pager_stats;
155
156         pager_stats.hidden_hits = 0;
157         pager_stats.ro_hits = 0;
158         pager_stats.rw_hits = 0;
159         pager_stats.zi_released = 0;
160 }
161
162 #else /* CFG_WITH_STATS */
163 static inline void incr_ro_hits(void) { }
164 static inline void incr_rw_hits(void) { }
165 static inline void incr_hidden_hits(void) { }
166 static inline void incr_zi_released(void) { }
167 static inline void incr_npages_all(void) { }
168 static inline void set_npages(void) { }
169
170 void tee_pager_get_stats(struct tee_pager_stats *stats)
171 {
172         memset(stats, 0, sizeof(struct tee_pager_stats));
173 }
174 #endif /* CFG_WITH_STATS */
175
176 static struct pgt pager_core_pgt;
177 struct core_mmu_table_info tee_pager_tbl_info;
178 static struct core_mmu_table_info pager_alias_tbl_info;
179
180 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
181
182 /* Defines the range of the alias area */
183 static tee_mm_entry_t *pager_alias_area;
184 /*
185  * Physical pages are added in a stack like fashion to the alias area,
186  * @pager_alias_next_free gives the address of next free entry if
187  * @pager_alias_next_free is != 0
188  */
189 static uintptr_t pager_alias_next_free;
190
191 static uint32_t pager_lock(void)
192 {
193         uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
194
195         cpu_spin_lock(&pager_spinlock);
196         return exceptions;
197 }
198
199 static void pager_unlock(uint32_t exceptions)
200 {
201         cpu_spin_unlock(&pager_spinlock);
202         thread_set_exceptions(exceptions);
203 }
204
205 static void set_alias_area(tee_mm_entry_t *mm)
206 {
207         struct core_mmu_table_info *ti = &pager_alias_tbl_info;
208         size_t tbl_va_size;
209         unsigned idx;
210         unsigned last_idx;
211         vaddr_t smem = tee_mm_get_smem(mm);
212         size_t nbytes = tee_mm_get_bytes(mm);
213
214         DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
215
216         if (pager_alias_area)
217                 panic("null pager_alias_area");
218
219         if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
220                 panic("Can't find translation table");
221
222         if ((1 << ti->shift) != SMALL_PAGE_SIZE)
223                 panic("Unsupported page size in translation table");
224
225         tbl_va_size = (1 << ti->shift) * ti->num_entries;
226         if (!core_is_buffer_inside(smem, nbytes,
227                                    ti->va_base, tbl_va_size)) {
228                 EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
229                      smem, nbytes, ti->va_base, tbl_va_size);
230                 panic();
231         }
232
233         if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
234                 panic("invalid area alignment");
235
236         pager_alias_area = mm;
237         pager_alias_next_free = smem;
238
239         /* Clear all mapping in the alias area */
240         idx = core_mmu_va2idx(ti, smem);
241         last_idx = core_mmu_va2idx(ti, smem + nbytes);
242         for (; idx < last_idx; idx++)
243                 core_mmu_set_entry(ti, idx, 0, 0);
244
245         /* TODO only invalidate entries touched above */
246         core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
247 }
248
249 static void generate_ae_key(void)
250 {
251         if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
252                 panic("failed to generate random");
253 }
254
255 void tee_pager_init(tee_mm_entry_t *mm_alias)
256 {
257         set_alias_area(mm_alias);
258         generate_ae_key();
259 }
260
261 static void *pager_add_alias_page(paddr_t pa)
262 {
263         unsigned idx;
264         struct core_mmu_table_info *ti = &pager_alias_tbl_info;
265         uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
266                         (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
267                         TEE_MATTR_SECURE | TEE_MATTR_PRW;
268
269         DMSG("0x%" PRIxPA, pa);
270
271         if (!pager_alias_next_free || !ti->num_entries)
272                 panic("invalid alias entry");
273
274         idx = core_mmu_va2idx(ti, pager_alias_next_free);
275         core_mmu_set_entry(ti, idx, pa, attr);
276         pgt_inc_used_entries(&pager_core_pgt);
277         pager_alias_next_free += SMALL_PAGE_SIZE;
278         if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
279                                       tee_mm_get_bytes(pager_alias_area)))
280                 pager_alias_next_free = 0;
281         return (void *)core_mmu_idx2va(ti, idx);
282 }
283
284 static struct tee_pager_area *alloc_area(struct pgt *pgt,
285                                          vaddr_t base, size_t size,
286                                          uint32_t flags, const void *store,
287                                          const void *hashes)
288 {
289         struct tee_pager_area *area = calloc(1, sizeof(*area));
290         enum area_type at;
291         tee_mm_entry_t *mm_store = NULL;
292
293         if (!area)
294                 return NULL;
295
296         if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
297                 if (flags & TEE_MATTR_LOCKED) {
298                         at = AREA_TYPE_LOCK;
299                         goto out;
300                 }
301                 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
302                 if (!mm_store)
303                         goto bad;
304                 area->store = phys_to_virt(tee_mm_get_smem(mm_store),
305                                            MEM_AREA_TA_RAM);
306                 if (!area->store)
307                         goto bad;
308                 area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
309                                      sizeof(struct pager_rw_pstate));
310                 if (!area->u.rwp)
311                         goto bad;
312                 at = AREA_TYPE_RW;
313         } else {
314                 area->store = (void *)store;
315                 area->u.hashes = hashes;
316                 at = AREA_TYPE_RO;
317         }
318 out:
319         area->pgt = pgt;
320         area->base = base;
321         area->size = size;
322         area->flags = flags;
323         area->type = at;
324         return area;
325 bad:
326         tee_mm_free(mm_store);
327         free(area->u.rwp);
328         free(area);
329         return NULL;
330 }
331
332 static void area_insert_tail(struct tee_pager_area *area)
333 {
334         uint32_t exceptions = pager_lock();
335
336         TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
337
338         pager_unlock(exceptions);
339 }
340 KEEP_PAGER(area_insert_tail);
341
342 static size_t tbl_usage_count(struct pgt *pgt)
343 {
344         size_t n;
345         paddr_t pa;
346         size_t usage = 0;
347
348         for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
349                 core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
350                                              n, &pa, NULL);
351                 if (pa)
352                         usage++;
353         }
354         return usage;
355 }
356
357 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
358                         const void *store, const void *hashes)
359 {
360         struct tee_pager_area *area;
361         size_t tbl_va_size;
362         struct core_mmu_table_info *ti = &tee_pager_tbl_info;
363
364         DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
365                 base, base + size, flags, store, hashes);
366
367         if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
368                 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
369                 panic();
370         }
371
372         if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
373                 panic("write pages cannot provide store or hashes");
374
375         if ((flags & TEE_MATTR_PW) && (store || hashes))
376                 panic("non-write pages must provide store and hashes");
377
378         if (!pager_core_pgt.tbl) {
379                 pager_core_pgt.tbl = ti->table;
380                 pgt_set_used_entries(&pager_core_pgt,
381                                      tbl_usage_count(&pager_core_pgt));
382         }
383
384         tbl_va_size = (1 << ti->shift) * ti->num_entries;
385         if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
386                 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
387                         base, size, ti->va_base, tbl_va_size);
388                 return false;
389         }
390
391         area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
392         if (!area)
393                 return false;
394
395         area_insert_tail(area);
396         return true;
397 }
398
399 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
400                                         vaddr_t va)
401 {
402         struct tee_pager_area *area;
403
404         if (!areas)
405                 return NULL;
406
407         TAILQ_FOREACH(area, areas, link) {
408                 if (core_is_buffer_inside(va, 1, area->base, area->size))
409                         return area;
410         }
411         return NULL;
412 }
413
414 #ifdef CFG_PAGED_USER_TA
415 static struct tee_pager_area *find_uta_area(vaddr_t va)
416 {
417         struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
418
419         if (!ctx || !is_user_ta_ctx(ctx))
420                 return NULL;
421         return find_area(to_user_ta_ctx(ctx)->areas, va);
422 }
423 #else
424 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
425 {
426         return NULL;
427 }
428 #endif /*CFG_PAGED_USER_TA*/
429
430
431 static uint32_t get_area_mattr(uint32_t area_flags)
432 {
433         uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
434                         TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
435                         (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
436
437         if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
438                 attr |= TEE_MATTR_GLOBAL;
439
440         return attr;
441 }
442
443 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
444 {
445         paddr_t pa;
446         unsigned idx;
447
448         idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
449         core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
450         return pa;
451 }
452
453 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
454                         void *dst)
455 {
456         struct pager_aes_gcm_iv iv = {
457                 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
458         };
459
460         return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
461                                      &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
462 }
463
464 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
465 {
466         struct pager_aes_gcm_iv iv;
467
468         assert((rwp->iv + 1) > rwp->iv);
469         rwp->iv++;
470         /*
471          * IV is constructed as recommended in section "8.2.1 Deterministic
472          * Construction" of "Recommendation for Block Cipher Modes of
473          * Operation: Galois/Counter Mode (GCM) and GMAC",
474          * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
475          */
476         iv.iv[0] = (vaddr_t)rwp;
477         iv.iv[1] = rwp->iv >> 32;
478         iv.iv[2] = rwp->iv;
479
480         if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
481                                    &iv, rwp->tag,
482                                    src, dst, SMALL_PAGE_SIZE))
483                 panic("gcm failed");
484 }
485
486 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
487                         void *va_alias)
488 {
489         size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
490         const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
491
492         switch (area->type) {
493         case AREA_TYPE_RO:
494                 {
495                         const void *hash = area->u.hashes +
496                                            idx * TEE_SHA256_HASH_SIZE;
497
498                         memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
499                         incr_ro_hits();
500
501                         if (hash_sha256_check(hash, va_alias,
502                                               SMALL_PAGE_SIZE) != TEE_SUCCESS) {
503                                 EMSG("PH 0x%" PRIxVA " failed", page_va);
504                                 panic();
505                         }
506                 }
507                 break;
508         case AREA_TYPE_RW:
509                 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
510                         va_alias, page_va, area->u.rwp[idx].iv);
511                 if (!area->u.rwp[idx].iv)
512                         memset(va_alias, 0, SMALL_PAGE_SIZE);
513                 else if (!decrypt_page(&area->u.rwp[idx], stored_page,
514                                        va_alias)) {
515                         EMSG("PH 0x%" PRIxVA " failed", page_va);
516                         panic();
517                 }
518                 incr_rw_hits();
519                 break;
520         case AREA_TYPE_LOCK:
521                 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
522                 memset(va_alias, 0, SMALL_PAGE_SIZE);
523                 break;
524         default:
525                 panic();
526         }
527 }
528
529 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
530 {
531         const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
532                                     TEE_MATTR_HIDDEN_DIRTY_BLOCK;
533
534         if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
535                 size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
536                 size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
537                 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
538
539                 assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
540                 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
541                              stored_page);
542                 FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
543                         pmem->area->base + idx * SMALL_PAGE_SIZE,
544                         pmem->area->u.rwp[idx].iv);
545         }
546 }
547
548 static void area_get_entry(struct tee_pager_area *area, size_t idx,
549                            paddr_t *pa, uint32_t *attr)
550 {
551         assert(area->pgt);
552         assert(idx < tee_pager_tbl_info.num_entries);
553         core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
554                                      idx, pa, attr);
555 }
556
557 static void area_set_entry(struct tee_pager_area *area, size_t idx,
558                            paddr_t pa, uint32_t attr)
559 {
560         assert(area->pgt);
561         assert(idx < tee_pager_tbl_info.num_entries);
562         core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
563                                      idx, pa, attr);
564 }
565
566 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
567 {
568         return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
569 }
570
571 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
572                                          size_t idx)
573 {
574         return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
575 }
576
577 #ifdef CFG_PAGED_USER_TA
578 static void free_area(struct tee_pager_area *area)
579 {
580         tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
581                                 virt_to_phys(area->store)));
582         if (area->type == AREA_TYPE_RW)
583                 free(area->u.rwp);
584         free(area);
585 }
586
587 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
588                                size_t size)
589 {
590         struct tee_pager_area *area;
591         uint32_t flags;
592         vaddr_t b = base;
593         size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
594
595         if (!utc->areas) {
596                 utc->areas = malloc(sizeof(*utc->areas));
597                 if (!utc->areas)
598                         return false;
599                 TAILQ_INIT(utc->areas);
600         }
601
602         flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
603
604         while (s) {
605                 size_t s2;
606
607                 if (find_area(utc->areas, b))
608                         return false;
609
610                 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
611
612                 /* Table info will be set when the context is activated. */
613                 area = alloc_area(NULL, b, s2, flags, NULL, NULL);
614                 if (!area)
615                         return false;
616                 TAILQ_INSERT_TAIL(utc->areas, area, link);
617                 b += s2;
618                 s -= s2;
619         }
620
621         return true;
622 }
623
624 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
625 {
626         struct thread_specific_data *tsd = thread_get_tsd();
627         struct tee_pager_area *area;
628         struct core_mmu_table_info dir_info = { NULL };
629
630         if (&utc->ctx != tsd->ctx) {
631                 /*
632                  * Changes are to an utc that isn't active. Just add the
633                  * areas page tables will be dealt with later.
634                  */
635                 return pager_add_uta_area(utc, base, size);
636         }
637
638         /*
639          * Assign page tables before adding areas to be able to tell which
640          * are newly added and should be removed in case of failure.
641          */
642         tee_pager_assign_uta_tables(utc);
643         if (!pager_add_uta_area(utc, base, size)) {
644                 struct tee_pager_area *next_a;
645
646                 /* Remove all added areas */
647                 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
648                         if (!area->pgt) {
649                                 TAILQ_REMOVE(utc->areas, area, link);
650                                 free_area(area);
651                         }
652                 }
653                 return false;
654         }
655
656         /*
657          * Assign page tables to the new areas and make sure that the page
658          * tables are registered in the upper table.
659          */
660         tee_pager_assign_uta_tables(utc);
661         core_mmu_get_user_pgdir(&dir_info);
662         TAILQ_FOREACH(area, utc->areas, link) {
663                 paddr_t pa;
664                 size_t idx;
665                 uint32_t attr;
666
667                 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
668                 core_mmu_get_entry(&dir_info, idx, &pa, &attr);
669
670                 /*
671                  * Check if the page table already is used, if it is, it's
672                  * already registered.
673                  */
674                 if (area->pgt->num_used_entries) {
675                         assert(attr & TEE_MATTR_TABLE);
676                         assert(pa == virt_to_phys(area->pgt->tbl));
677                         continue;
678                 }
679
680                 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
681                 pa = virt_to_phys(area->pgt->tbl);
682                 assert(pa);
683                 /*
684                  * Note that the update of the table entry is guaranteed to
685                  * be atomic.
686                  */
687                 core_mmu_set_entry(&dir_info, idx, pa, attr);
688         }
689
690         return true;
691 }
692
693 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
694                                    struct pgt *pgt)
695 {
696         assert(pgt);
697         ti->table = pgt->tbl;
698         ti->va_base = pgt->vabase;
699         ti->level = tee_pager_tbl_info.level;
700         ti->shift = tee_pager_tbl_info.shift;
701         ti->num_entries = tee_pager_tbl_info.num_entries;
702 }
703
704 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
705                            vaddr_t new_base)
706 {
707         uint32_t exceptions = pager_lock();
708
709         /*
710          * If there's no pgt assigned to the old area there's no pages to
711          * deal with either, just update with a new pgt and base.
712          */
713         if (area->pgt) {
714                 struct core_mmu_table_info old_ti;
715                 struct core_mmu_table_info new_ti;
716                 struct tee_pager_pmem *pmem;
717
718                 init_tbl_info_from_pgt(&old_ti, area->pgt);
719                 init_tbl_info_from_pgt(&new_ti, new_pgt);
720
721
722                 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
723                         vaddr_t va;
724                         paddr_t pa;
725                         uint32_t attr;
726
727                         if (pmem->area != area)
728                                 continue;
729                         core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
730                         core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
731
732                         assert(pa == get_pmem_pa(pmem));
733                         assert(attr);
734                         assert(area->pgt->num_used_entries);
735                         area->pgt->num_used_entries--;
736
737                         va = core_mmu_idx2va(&old_ti, pmem->pgidx);
738                         va = va - area->base + new_base;
739                         pmem->pgidx = core_mmu_va2idx(&new_ti, va);
740                         core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
741                         new_pgt->num_used_entries++;
742                 }
743         }
744
745         area->pgt = new_pgt;
746         area->base = new_base;
747         pager_unlock(exceptions);
748 }
749 KEEP_PAGER(transpose_area);
750
751 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
752                                    vaddr_t src_base,
753                                    struct user_ta_ctx *dst_utc,
754                                    vaddr_t dst_base, struct pgt **dst_pgt,
755                                    size_t size)
756 {
757         struct tee_pager_area *area;
758         struct tee_pager_area *next_a;
759
760         TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
761                 vaddr_t new_area_base;
762                 size_t new_idx;
763
764                 if (!core_is_buffer_inside(area->base, area->size,
765                                           src_base, size))
766                         continue;
767
768                 TAILQ_REMOVE(src_utc->areas, area, link);
769
770                 new_area_base = dst_base + (src_base - area->base);
771                 new_idx = (new_area_base - dst_pgt[0]->vabase) /
772                           CORE_MMU_PGDIR_SIZE;
773                 assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
774                        dst_pgt[new_idx]->vabase);
775                 transpose_area(area, dst_pgt[new_idx], new_area_base);
776
777                 /*
778                  * Assert that this will not cause any conflicts in the new
779                  * utc.  This should already be guaranteed, but a bug here
780                  * could be tricky to find.
781                  */
782                 assert(!find_area(dst_utc->areas, area->base));
783                 TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
784         }
785 }
786
787 static void rem_area(struct tee_pager_area_head *area_head,
788                      struct tee_pager_area *area)
789 {
790         struct tee_pager_pmem *pmem;
791         uint32_t exceptions;
792
793         exceptions = pager_lock();
794
795         TAILQ_REMOVE(area_head, area, link);
796
797         TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
798                 if (pmem->area == area) {
799                         area_set_entry(area, pmem->pgidx, 0, 0);
800                         pgt_dec_used_entries(area->pgt);
801                         pmem->area = NULL;
802                         pmem->pgidx = INVALID_PGIDX;
803                 }
804         }
805
806         pager_unlock(exceptions);
807         free_area(area);
808 }
809 KEEP_PAGER(rem_area);
810
811 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
812                               size_t size)
813 {
814         struct tee_pager_area *area;
815         struct tee_pager_area *next_a;
816         size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
817
818         TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
819                 if (core_is_buffer_inside(area->base, area->size, base, s))
820                         rem_area(utc->areas, area);
821         }
822 }
823
824 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
825 {
826         struct tee_pager_area *area;
827
828         if (!utc->areas)
829                 return;
830
831         while (true) {
832                 area = TAILQ_FIRST(utc->areas);
833                 if (!area)
834                         break;
835                 TAILQ_REMOVE(utc->areas, area, link);
836                 free_area(area);
837         }
838
839         free(utc->areas);
840 }
841
842 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
843                                  size_t size, uint32_t flags)
844 {
845         bool ret;
846         vaddr_t b = base;
847         size_t s = size;
848         size_t s2;
849         struct tee_pager_area *area = find_area(utc->areas, b);
850         uint32_t exceptions;
851         struct tee_pager_pmem *pmem;
852         paddr_t pa;
853         uint32_t a;
854         uint32_t f;
855
856         f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
857         if (f & TEE_MATTR_UW)
858                 f |= TEE_MATTR_PW;
859         f = get_area_mattr(f);
860
861         exceptions = pager_lock();
862
863         while (s) {
864                 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
865                 if (!area || area->base != b || area->size != s2) {
866                         ret = false;
867                         goto out;
868                 }
869                 b += s2;
870                 s -= s2;
871
872                 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
873                         if (pmem->area != area)
874                                 continue;
875                         area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
876                         if (a & TEE_MATTR_VALID_BLOCK)
877                                 assert(pa == get_pmem_pa(pmem));
878                         else
879                                 pa = get_pmem_pa(pmem);
880                         if (a == f)
881                                 continue;
882                         area_set_entry(pmem->area, pmem->pgidx, 0, 0);
883                         /* TODO only invalidate entries touched above */
884                         core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
885                         if (!(flags & TEE_MATTR_UW))
886                                 tee_pager_save_page(pmem, a);
887
888                         area_set_entry(pmem->area, pmem->pgidx, pa, f);
889
890                         if (flags & TEE_MATTR_UX) {
891                                 void *va = (void *)area_idx2va(pmem->area,
892                                                                pmem->pgidx);
893
894                                 cache_op_inner(DCACHE_AREA_CLEAN, va,
895                                                 SMALL_PAGE_SIZE);
896                                 cache_op_inner(ICACHE_AREA_INVALIDATE, va,
897                                                 SMALL_PAGE_SIZE);
898                         }
899                 }
900
901                 area->flags = f;
902                 area = TAILQ_NEXT(area, link);
903         }
904
905         ret = true;
906 out:
907         pager_unlock(exceptions);
908         return ret;
909 }
910 KEEP_PAGER(tee_pager_set_uta_area_attr);
911 #endif /*CFG_PAGED_USER_TA*/
912
913 static bool tee_pager_unhide_page(vaddr_t page_va)
914 {
915         struct tee_pager_pmem *pmem;
916
917         TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
918                 paddr_t pa;
919                 uint32_t attr;
920
921                 if (pmem->pgidx == INVALID_PGIDX)
922                         continue;
923
924                 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
925
926                 if (!(attr &
927                      (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
928                         continue;
929
930                 if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
931                         uint32_t a = get_area_mattr(pmem->area->flags);
932
933                         /* page is hidden, show and move to back */
934                         if (pa != get_pmem_pa(pmem))
935                                 panic("unexpected pa");
936
937                         /*
938                          * If it's not a dirty block, then it should be
939                          * read only.
940                          */
941                         if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
942                                 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
943                         else
944                                 FMSG("Unhide %#" PRIxVA, page_va);
945
946                         if (page_va == 0x8000a000)
947                                 FMSG("unhide %#" PRIxVA " a %#" PRIX32,
948                                         page_va, a);
949                         area_set_entry(pmem->area, pmem->pgidx, pa, a);
950
951                         TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
952                         TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
953
954                         /* TODO only invalidate entry touched above */
955                         core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
956
957                         incr_hidden_hits();
958                         return true;
959                 }
960         }
961
962         return false;
963 }
964
965 static void tee_pager_hide_pages(void)
966 {
967         struct tee_pager_pmem *pmem;
968         size_t n = 0;
969
970         TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
971                 paddr_t pa;
972                 uint32_t attr;
973                 uint32_t a;
974
975                 if (n >= TEE_PAGER_NHIDE)
976                         break;
977                 n++;
978
979                 /* we cannot hide pages when pmem->area is not defined. */
980                 if (!pmem->area)
981                         continue;
982
983                 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
984                 if (!(attr & TEE_MATTR_VALID_BLOCK))
985                         continue;
986
987                 assert(pa == get_pmem_pa(pmem));
988                 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
989                         a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
990                         FMSG("Hide %#" PRIxVA,
991                              area_idx2va(pmem->area, pmem->pgidx));
992                 } else
993                         a = TEE_MATTR_HIDDEN_BLOCK;
994                 area_set_entry(pmem->area, pmem->pgidx, pa, a);
995         }
996
997         /* TODO only invalidate entries touched above */
998         core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
999 }
1000
1001 /*
1002  * Find mapped pmem, hide and move to pageble pmem.
1003  * Return false if page was not mapped, and true if page was mapped.
1004  */
1005 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1006                                        vaddr_t page_va)
1007 {
1008         struct tee_pager_pmem *pmem;
1009         unsigned pgidx;
1010         paddr_t pa;
1011         uint32_t attr;
1012
1013         pgidx = area_va2idx(area, page_va);
1014         area_get_entry(area, pgidx, &pa, &attr);
1015
1016         FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1017
1018         TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1019                 if (pmem->area != area || pmem->pgidx != pgidx)
1020                         continue;
1021
1022                 assert(pa == get_pmem_pa(pmem));
1023                 area_set_entry(area, pgidx, 0, 0);
1024                 pgt_dec_used_entries(area->pgt);
1025                 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1026                 pmem->area = NULL;
1027                 pmem->pgidx = INVALID_PGIDX;
1028                 tee_pager_npages++;
1029                 set_npages();
1030                 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1031                 incr_zi_released();
1032                 return true;
1033         }
1034
1035         return false;
1036 }
1037
1038 /* Finds the oldest page and unmats it from its old virtual address */
1039 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1040 {
1041         struct tee_pager_pmem *pmem;
1042
1043         pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1044         if (!pmem) {
1045                 EMSG("No pmem entries");
1046                 return NULL;
1047         }
1048         if (pmem->pgidx != INVALID_PGIDX) {
1049                 uint32_t a;
1050
1051                 assert(pmem->area && pmem->area->pgt);
1052                 area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1053                 area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1054                 pgt_dec_used_entries(pmem->area->pgt);
1055                 /* TODO only invalidate entries touched above */
1056                 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1057                 tee_pager_save_page(pmem, a);
1058         }
1059
1060         TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1061         pmem->pgidx = INVALID_PGIDX;
1062         pmem->area = NULL;
1063         if (area->type == AREA_TYPE_LOCK) {
1064                 /* Move page to lock list */
1065                 if (tee_pager_npages <= 0)
1066                         panic("running out of page");
1067                 tee_pager_npages--;
1068                 set_npages();
1069                 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1070         } else {
1071                 /* move page to back */
1072                 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1073         }
1074
1075         return pmem;
1076 }
1077
1078 static bool pager_update_permissions(struct tee_pager_area *area,
1079                         struct abort_info *ai, bool *handled)
1080 {
1081         unsigned int pgidx = area_va2idx(area, ai->va);
1082         uint32_t attr;
1083         paddr_t pa;
1084
1085         *handled = false;
1086
1087         area_get_entry(area, pgidx, &pa, &attr);
1088
1089         /* Not mapped */
1090         if (!(attr & TEE_MATTR_VALID_BLOCK))
1091                 return false;
1092
1093         /* Not readable, should not happen */
1094         if (abort_is_user_exception(ai)) {
1095                 if (!(attr & TEE_MATTR_UR))
1096                         return true;
1097         } else {
1098                 if (!(attr & TEE_MATTR_PR)) {
1099                         abort_print_error(ai);
1100                         panic();
1101                 }
1102         }
1103
1104         switch (core_mmu_get_fault_type(ai->fault_descr)) {
1105         case CORE_MMU_FAULT_TRANSLATION:
1106         case CORE_MMU_FAULT_READ_PERMISSION:
1107                 if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1108                         /* Check attempting to execute from an NOX page */
1109                         if (abort_is_user_exception(ai)) {
1110                                 if (!(attr & TEE_MATTR_UX))
1111                                         return true;
1112                         } else {
1113                                 if (!(attr & TEE_MATTR_PX)) {
1114                                         abort_print_error(ai);
1115                                         panic();
1116                                 }
1117                         }
1118                 }
1119                 /* Since the page is mapped now it's OK */
1120                 break;
1121         case CORE_MMU_FAULT_WRITE_PERMISSION:
1122                 /* Check attempting to write to an RO page */
1123                 if (abort_is_user_exception(ai)) {
1124                         if (!(area->flags & TEE_MATTR_UW))
1125                                 return true;
1126                         if (!(attr & TEE_MATTR_UW)) {
1127                                 FMSG("Dirty %p",
1128                                      (void *)(ai->va & ~SMALL_PAGE_MASK));
1129                                 area_set_entry(area, pgidx, pa,
1130                                                get_area_mattr(area->flags));
1131                                 /* TODO only invalidate entry above */
1132                                 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1133                         }
1134
1135                 } else {
1136                         if (!(area->flags & TEE_MATTR_PW)) {
1137                                 abort_print_error(ai);
1138                                 panic();
1139                         }
1140                         if (!(attr & TEE_MATTR_PW)) {
1141                                 FMSG("Dirty %p",
1142                                      (void *)(ai->va & ~SMALL_PAGE_MASK));
1143                                 area_set_entry(area, pgidx, pa,
1144                                                get_area_mattr(area->flags));
1145                                 /* TODO only invalidate entry above */
1146                                 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1147                         }
1148                 }
1149                 /* Since permissions has been updated now it's OK */
1150                 break;
1151         default:
1152                 /* Some fault we can't deal with */
1153                 if (abort_is_user_exception(ai))
1154                         return true;
1155                 abort_print_error(ai);
1156                 panic();
1157         }
1158         *handled = true;
1159         return true;
1160 }
1161
1162 #ifdef CFG_TEE_CORE_DEBUG
1163 static void stat_handle_fault(void)
1164 {
1165         static size_t num_faults;
1166         static size_t min_npages = SIZE_MAX;
1167         static size_t total_min_npages = SIZE_MAX;
1168
1169         num_faults++;
1170         if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1171                 DMSG("nfaults %zu npages %zu (min %zu)",
1172                      num_faults, tee_pager_npages, min_npages);
1173                 min_npages = tee_pager_npages; /* reset */
1174         }
1175         if (tee_pager_npages < min_npages)
1176                 min_npages = tee_pager_npages;
1177         if (tee_pager_npages < total_min_npages)
1178                 total_min_npages = tee_pager_npages;
1179 }
1180 #else
1181 static void stat_handle_fault(void)
1182 {
1183 }
1184 #endif
1185
1186 bool tee_pager_handle_fault(struct abort_info *ai)
1187 {
1188         struct tee_pager_area *area;
1189         vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1190         uint32_t exceptions;
1191         bool ret;
1192
1193 #ifdef TEE_PAGER_DEBUG_PRINT
1194         abort_print(ai);
1195 #endif
1196
1197         /*
1198          * We're updating pages that can affect several active CPUs at a
1199          * time below. We end up here because a thread tries to access some
1200          * memory that isn't available. We have to be careful when making
1201          * that memory available as other threads may succeed in accessing
1202          * that address the moment after we've made it available.
1203          *
1204          * That means that we can't just map the memory and populate the
1205          * page, instead we use the aliased mapping to populate the page
1206          * and once everything is ready we map it.
1207          */
1208         exceptions = pager_lock();
1209
1210         stat_handle_fault();
1211
1212         /* check if the access is valid */
1213         if (abort_is_user_exception(ai)) {
1214                 area = find_uta_area(ai->va);
1215
1216         } else {
1217                 area = find_area(&tee_pager_area_head, ai->va);
1218                 if (!area)
1219                         area = find_uta_area(ai->va);
1220         }
1221         if (!area || !area->pgt) {
1222                 ret = false;
1223                 goto out;
1224         }
1225
1226         if (!tee_pager_unhide_page(page_va)) {
1227                 struct tee_pager_pmem *pmem = NULL;
1228                 uint32_t attr;
1229
1230                 /*
1231                  * The page wasn't hidden, but some other core may have
1232                  * updated the table entry before we got here or we need
1233                  * to make a read-only page read-write (dirty).
1234                  */
1235                 if (pager_update_permissions(area, ai, &ret)) {
1236                         /*
1237                          * Nothing more to do with the abort. The problem
1238                          * could already have been dealt with from another
1239                          * core or if ret is false the TA will be paniced.
1240                          */
1241                         goto out;
1242                 }
1243
1244                 pmem = tee_pager_get_page(area);
1245                 if (!pmem) {
1246                         abort_print(ai);
1247                         panic();
1248                 }
1249
1250                 /* load page code & data */
1251                 tee_pager_load_page(area, page_va, pmem->va_alias);
1252
1253                 /*
1254                  * We've updated the page using the aliased mapping and
1255                  * some cache maintenence is now needed if it's an
1256                  * executable page.
1257                  *
1258                  * Since the d-cache is a Physically-indexed,
1259                  * physically-tagged (PIPT) cache we can clean the aliased
1260                  * address instead of the real virtual address.
1261                  *
1262                  * The i-cache can also be PIPT, but may be something else
1263                  * to, to keep it simple we invalidate the entire i-cache.
1264                  * As a future optimization we may invalidate only the
1265                  * aliased area if it a PIPT cache else the entire cache.
1266                  */
1267                 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1268                         /*
1269                          * Doing these operations to LoUIS (Level of
1270                          * unification, Inner Shareable) would be enough
1271                          */
1272                         cache_op_inner(DCACHE_AREA_CLEAN, pmem->va_alias,
1273                                         SMALL_PAGE_SIZE);
1274                         cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
1275                 }
1276
1277                 pmem->area = area;
1278                 pmem->pgidx = area_va2idx(area, ai->va);
1279                 attr = get_area_mattr(area->flags) &
1280                         ~(TEE_MATTR_PW | TEE_MATTR_UW);
1281                 area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1282                 pgt_inc_used_entries(area->pgt);
1283
1284                 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1285                      area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1286
1287         }
1288
1289         tee_pager_hide_pages();
1290         ret = true;
1291 out:
1292         pager_unlock(exceptions);
1293         return ret;
1294 }
1295
1296 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1297 {
1298         struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1299         size_t n;
1300
1301         DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1302              vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1303
1304         /* setup memory */
1305         for (n = 0; n < npages; n++) {
1306                 struct tee_pager_pmem *pmem;
1307                 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1308                 unsigned pgidx = core_mmu_va2idx(ti, va);
1309                 paddr_t pa;
1310                 uint32_t attr;
1311
1312                 /*
1313                  * Note that we can only support adding pages in the
1314                  * valid range of this table info, currently not a problem.
1315                  */
1316                 core_mmu_get_entry(ti, pgidx, &pa, &attr);
1317
1318                 /* Ignore unmapped pages/blocks */
1319                 if (!(attr & TEE_MATTR_VALID_BLOCK))
1320                         continue;
1321
1322                 pmem = malloc(sizeof(struct tee_pager_pmem));
1323                 if (!pmem)
1324                         panic("out of mem");
1325
1326                 pmem->va_alias = pager_add_alias_page(pa);
1327
1328                 if (unmap) {
1329                         pmem->area = NULL;
1330                         pmem->pgidx = INVALID_PGIDX;
1331                         core_mmu_set_entry(ti, pgidx, 0, 0);
1332                         pgt_dec_used_entries(&pager_core_pgt);
1333                 } else {
1334                         /*
1335                          * The page is still mapped, let's assign the area
1336                          * and update the protection bits accordingly.
1337                          */
1338                         pmem->area = find_area(&tee_pager_area_head, va);
1339                         assert(pmem->area->pgt == &pager_core_pgt);
1340                         pmem->pgidx = pgidx;
1341                         assert(pa == get_pmem_pa(pmem));
1342                         area_set_entry(pmem->area, pgidx, pa,
1343                                        get_area_mattr(pmem->area->flags));
1344                 }
1345
1346                 tee_pager_npages++;
1347                 incr_npages_all();
1348                 set_npages();
1349                 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1350         }
1351
1352         /* Invalidate secure TLB */
1353         core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1354 }
1355
1356 #ifdef CFG_PAGED_USER_TA
1357 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1358 {
1359         struct pgt *p = pgt;
1360
1361         while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1362                 p = SLIST_NEXT(p, link);
1363         return p;
1364 }
1365
1366 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1367 {
1368         struct tee_pager_area *area;
1369         struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1370
1371         TAILQ_FOREACH(area, utc->areas, link) {
1372                 if (!area->pgt)
1373                         area->pgt = find_pgt(pgt, area->base);
1374                 else
1375                         assert(area->pgt == find_pgt(pgt, area->base));
1376                 if (!area->pgt)
1377                         panic();
1378         }
1379 }
1380
1381 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1382 {
1383         uint32_t attr;
1384
1385         assert(pmem->area && pmem->area->pgt);
1386
1387         area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1388         area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1389         tee_pager_save_page(pmem, attr);
1390         assert(pmem->area->pgt->num_used_entries);
1391         pmem->area->pgt->num_used_entries--;
1392         pmem->pgidx = INVALID_PGIDX;
1393         pmem->area = NULL;
1394 }
1395
1396 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1397 {
1398         struct tee_pager_pmem *pmem;
1399         struct tee_pager_area *area;
1400         uint32_t exceptions = pager_lock();
1401
1402         if (!pgt->num_used_entries)
1403                 goto out;
1404
1405         TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1406                 if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1407                         continue;
1408                 if (pmem->area->pgt == pgt)
1409                         pager_save_and_release_entry(pmem);
1410         }
1411         assert(!pgt->num_used_entries);
1412
1413 out:
1414         if (is_user_ta_ctx(pgt->ctx)) {
1415                 TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1416                         if (area->pgt == pgt)
1417                                 area->pgt = NULL;
1418                 }
1419         }
1420
1421         pager_unlock(exceptions);
1422 }
1423 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1424 #endif /*CFG_PAGED_USER_TA*/
1425
1426 void tee_pager_release_phys(void *addr, size_t size)
1427 {
1428         bool unmaped = false;
1429         vaddr_t va = (vaddr_t)addr;
1430         vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1431         vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1432         struct tee_pager_area *area;
1433         uint32_t exceptions;
1434
1435         if (!size)
1436                 return;
1437
1438         area = find_area(&tee_pager_area_head, begin);
1439         if (!area ||
1440             area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1441                 panic();
1442
1443         exceptions = pager_lock();
1444
1445         for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1446                 unmaped |= tee_pager_release_one_phys(area, va);
1447
1448         /* Invalidate secure TLB */
1449         if (unmaped)
1450                 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1451
1452         pager_unlock(exceptions);
1453 }
1454 KEEP_PAGER(tee_pager_release_phys);
1455
1456 void *tee_pager_alloc(size_t size, uint32_t flags)
1457 {
1458         tee_mm_entry_t *mm;
1459         uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1460
1461         if (!size)
1462                 return NULL;
1463
1464         mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1465         if (!mm)
1466                 return NULL;
1467
1468         tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1469                                 f, NULL, NULL);
1470
1471         return (void *)tee_mm_get_smem(mm);
1472 }