2 * Copyright (c) 2016-2017, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 #include <kernel/mutex.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <mm/core_mmu.h>
35 #include <mm/tee_mmu.h>
36 #include <mm/tee_pager.h>
37 #include <optee_msg.h>
38 #include <sm/optee_smc.h>
40 #include <tee_api_types.h>
41 #include <types_ext.h>
44 struct mobj *mobj_sec_ddr;
47 * mobj_phys implementation
52 enum buf_is_attr battr;
53 uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
58 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
60 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
62 struct mobj_phys *moph = to_mobj_phys(mobj);
67 return (void *)(moph->va + offset);
70 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
71 size_t granule, paddr_t *pa)
73 struct mobj_phys *moph = to_mobj_phys(mobj);
77 return TEE_ERROR_GENERIC;
82 if (granule != SMALL_PAGE_SIZE &&
83 granule != CORE_MMU_PGDIR_SIZE)
84 return TEE_ERROR_GENERIC;
91 /* ifndef due to an asserting AArch64 linker */
93 KEEP_PAGER(mobj_phys_get_pa);
96 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
98 struct mobj_phys *moph = to_mobj_phys(mobj);
101 return TEE_ERROR_GENERIC;
103 *cattr = moph->cattr;
107 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
109 struct mobj_phys *moph = to_mobj_phys(mobj);
116 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
117 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
118 case CORE_MEM_NON_SEC:
119 return a == CORE_MEM_NSEC_SHM;
120 case CORE_MEM_TEE_RAM:
121 case CORE_MEM_TA_RAM:
122 case CORE_MEM_NSEC_SHM:
123 case CORE_MEM_SDP_MEM:
130 static void mobj_phys_free(struct mobj *mobj)
132 struct mobj_phys *moph = to_mobj_phys(mobj);
137 static const struct mobj_ops mobj_phys_ops __rodata_unpaged = {
138 .get_va = mobj_phys_get_va,
139 .get_pa = mobj_phys_get_pa,
140 .get_cattr = mobj_phys_get_cattr,
141 .matches = mobj_phys_matches,
142 .free = mobj_phys_free,
145 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
147 assert(mobj->ops == &mobj_phys_ops);
148 return container_of(mobj, struct mobj_phys, mobj);
151 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
152 enum buf_is_attr battr)
154 struct mobj_phys *moph;
155 enum teecore_memtypes area_type;
158 if ((pa & CORE_MMU_USER_PARAM_MASK) ||
159 (size & CORE_MMU_USER_PARAM_MASK)) {
160 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
165 case CORE_MEM_TEE_RAM:
166 area_type = MEM_AREA_TEE_RAM;
168 case CORE_MEM_TA_RAM:
169 area_type = MEM_AREA_TA_RAM;
171 case CORE_MEM_NSEC_SHM:
172 area_type = MEM_AREA_NSEC_SHM;
174 case CORE_MEM_SDP_MEM:
175 area_type = MEM_AREA_SDP_MEM;
178 DMSG("can't allocate with specified attribute");
182 /* Only SDP memory may not have a virtual address */
183 va = phys_to_virt(pa, area_type);
184 if (!va && battr != CORE_MEM_SDP_MEM)
187 moph = calloc(1, sizeof(*moph));
193 moph->mobj.size = size;
194 moph->mobj.ops = &mobj_phys_ops;
196 moph->va = (vaddr_t)va;
202 * mobj_virt implementation
205 static void mobj_virt_assert_type(struct mobj *mobj);
207 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
209 mobj_virt_assert_type(mobj);
211 return (void *)(vaddr_t)offset;
214 static const struct mobj_ops mobj_virt_ops __rodata_unpaged = {
215 .get_va = mobj_virt_get_va,
218 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
220 assert(mobj->ops == &mobj_virt_ops);
223 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
226 * mobj_mm implementation
231 struct mobj *parent_mobj;
235 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
237 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
239 tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
241 return (mm->offset << mm->pool->shift) + offs;
244 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
246 return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
247 mobj_mm_offs(mobj, offs));
251 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
252 size_t granule, paddr_t *pa)
254 return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
255 mobj_mm_offs(mobj, offs), granule, pa);
257 /* ifndef due to an asserting AArch64 linker */
259 KEEP_PAGER(mobj_mm_get_pa);
262 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
264 return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
267 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
269 return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
272 static void mobj_mm_free(struct mobj *mobj)
274 struct mobj_mm *m = to_mobj_mm(mobj);
280 static const struct mobj_ops mobj_mm_ops __rodata_unpaged = {
281 .get_va = mobj_mm_get_va,
282 .get_pa = mobj_mm_get_pa,
283 .get_cattr = mobj_mm_get_cattr,
284 .matches = mobj_mm_matches,
285 .free = mobj_mm_free,
288 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
290 assert(mobj->ops == &mobj_mm_ops);
291 return container_of(mobj, struct mobj_mm, mobj);
294 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
297 struct mobj_mm *m = calloc(1, sizeof(*m));
302 m->mm = tee_mm_alloc(pool, size);
308 m->parent_mobj = mobj_parent;
310 m->mobj.ops = &mobj_mm_ops;
315 #ifdef CFG_PAGED_USER_TA
317 * mobj_paged implementation
320 static void mobj_paged_free(struct mobj *mobj);
322 static const struct mobj_ops mobj_paged_ops __rodata_unpaged = {
323 .free = mobj_paged_free,
326 static void mobj_paged_free(struct mobj *mobj)
328 assert(mobj->ops == &mobj_paged_ops);
332 struct mobj *mobj_paged_alloc(size_t size)
334 struct mobj *mobj = calloc(1, sizeof(*mobj));
338 mobj->ops = &mobj_paged_ops;
344 * mobj_seccpy_shm implementation
347 struct mobj_seccpy_shm {
348 struct user_ta_ctx *utc;
354 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
356 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
358 assert(mobj_is_seccpy_shm(mobj));
359 return container_of(mobj, struct mobj_seccpy_shm, mobj);
362 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
364 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
366 if (&m->utc->ctx != thread_get_tsd()->ctx)
369 if (offs >= mobj->size)
371 return (void *)(m->va + offs);
374 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
375 enum buf_is_attr attr)
377 assert(mobj_is_seccpy_shm(mobj));
379 return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
382 static void mobj_seccpy_shm_free(struct mobj *mobj)
384 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
386 tee_pager_rem_uta_region(m->utc, m->va, mobj->size);
387 tee_mmu_rem_rwmem(m->utc, mobj, m->va);
391 static void mobj_seccpy_shm_update_mapping(struct mobj *mobj,
392 struct user_ta_ctx *utc, vaddr_t va)
394 struct thread_specific_data *tsd = thread_get_tsd();
395 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
398 if (utc == m->utc && va == m->va)
401 s = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
402 pgt_transfer(&tsd->pgt_cache, &m->utc->ctx, m->va, &utc->ctx, va, s);
408 static const struct mobj_ops mobj_seccpy_shm_ops __rodata_unpaged = {
409 .get_va = mobj_seccpy_shm_get_va,
410 .matches = mobj_seccpy_shm_matches,
411 .free = mobj_seccpy_shm_free,
412 .update_mapping = mobj_seccpy_shm_update_mapping,
415 static bool mobj_is_seccpy_shm(struct mobj *mobj)
417 return mobj && mobj->ops == &mobj_seccpy_shm_ops;
420 struct mobj *mobj_seccpy_shm_alloc(size_t size)
422 struct thread_specific_data *tsd = thread_get_tsd();
423 struct mobj_seccpy_shm *m;
424 struct user_ta_ctx *utc;
427 if (!is_user_ta_ctx(tsd->ctx))
429 utc = to_user_ta_ctx(tsd->ctx);
431 m = calloc(1, sizeof(*m));
436 m->mobj.ops = &mobj_seccpy_shm_ops;
438 if (tee_mmu_add_rwmem(utc, &m->mobj, -1, &va) != TEE_SUCCESS)
441 if (!tee_pager_add_uta_area(utc, va, size))
445 m->pgdir_offset = va & CORE_MMU_PGDIR_MASK;
446 m->utc = to_user_ta_ctx(tsd->ctx);
450 tee_mmu_rem_rwmem(utc, &m->mobj, va);
455 bool mobj_is_paged(struct mobj *mobj)
457 return mobj->ops == &mobj_paged_ops ||
458 mobj->ops == &mobj_seccpy_shm_ops;
460 #endif /*CFG_PAGED_USER_TA*/