2 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
32 #include <utee_defines.h>
33 #include <sys/queue.h>
36 #include "tee_user_mem.h"
37 #include "utee_misc.h"
39 #ifdef CFG_NO_USER_MALLOC_GARBAGE
41 void *tee_user_mem_alloc(size_t len, uint32_t hint)
46 case TEE_MALLOC_FILL_ZERO:
47 case TEE_USER_MEM_HINT_NO_FILL_ZERO:
50 EMSG("Invalid alloc hint [%X]", (unsigned int)hint);
58 if (hint == TEE_MALLOC_FILL_ZERO)
60 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
61 if (hint == (typeof(hint)) TEE_USER_MEM_HINT_NO_FILL_ZERO)
67 void *tee_user_mem_realloc(void *buffer, size_t len)
69 return utee_realloc(buffer, len);
72 void tee_user_mem_free(void *buffer)
77 void tee_user_mem_mark_heap(void)
81 size_t tee_user_mem_check_heap(void)
86 #else /* CFG_NO_USER_MALLOC_GARBAGE */
89 * Manage and track the memory allocation in the libc heap of the user side (TA)
90 * Register all allocations and the current TA Provide a garbage api to delete
91 * all allocations of a given TA.
95 * ARTIST is a magic number to be compliant to a allocation/free of 0 size.
97 static const void *ARTIST = (void *)0x10;
100 * Link list definition for tracking the memory activity.
102 struct user_mem_elem {
103 TAILQ_ENTRY(user_mem_elem) link;
107 TAILQ_HEAD(user_mem_head, user_mem_elem) user_mem_head =
108 TAILQ_HEAD_INITIALIZER(user_mem_head);
113 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
114 struct tee_user_mem_stats {
118 static void tee_user_mem_status(struct tee_user_mem_stats *stats);
120 /* Extra size of memory to add canary line check */
121 static const size_t CANARY_LINE_SIZE = 1;
123 static const size_t CANARY_LINE_SIZE;
127 * Accessors from an element of the list and its attribute.
129 static inline void *buf_addr(const struct user_mem_elem *e)
131 return (uint8_t *)e + sizeof(struct user_mem_elem);
134 static inline size_t buf_size(const struct user_mem_elem *e)
136 return e->len - sizeof(struct user_mem_elem) - CANARY_LINE_SIZE;
139 static inline void *elem_addr(const void *buffer)
141 return (uint8_t *)buffer - sizeof(struct user_mem_elem);
145 * Check if a given buffer address has been allocated with this tool.
147 static int is_buffer_valid(void *buffer)
149 struct user_mem_elem *e;
151 TAILQ_FOREACH(e, &user_mem_head, link) {
152 if (buf_addr(e) == buffer)
158 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
160 * Common print of an element.
162 #if (TRACE_LEVEL > 0)
163 static void print_buf(int tl, const char *func, int line, const char *prefix,
164 const struct user_mem_elem *e)
166 trace_printf(NULL, 0, tl, true,
167 "%s:%d: %slink:[%p], buf:[%p:%zu]\n",
168 func, line, prefix, (void *)e, buf_addr(e), buf_size(e));
171 #define PB(trace_level, prefix, elem) { print_buf(trace_level, __func__, \
172 __LINE__, prefix, elem); }
174 #define PB(trace_level, prefix, elem) (void)0
175 #endif /* TRACE_LEVEL */
178 * Heap mark to track leak.
180 * Can't use OS21 partition api to be compatible with TZ.
182 * Can't use generic mallinfo to dump the libc heap because the tee core
183 * use also this heap.
185 * So use a simple static var which is updated on tee_user_mem_ operations.
187 static size_t heap_level;
190 * global stats to summarize memory activities cross TA's.
192 static struct tee_user_mem_stats global_stats;
194 static void heap_inc(size_t size)
199 global_stats.nb_alloc++;
200 global_stats.size += size;
201 OUTMSG("%zu", global_stats.size);
204 static void heap_dec(size_t size)
206 INMSG("%zu %zu", heap_level, size);
209 global_stats.nb_alloc--;
210 global_stats.size -= size;
211 OUTMSG("%zu", global_stats.size);
215 * Check integrity of the buffer and the list.
217 static int check_elem_end(struct user_mem_elem *e)
219 uint8_t *cp = (uint8_t *)e;
222 * The following check detects storing off the end of the allocated
223 * space in the buffer by comparing the end of buffer checksum with the
224 * address of the buffer.
226 if ((cp[e->len - CANARY_LINE_SIZE] !=
227 ((((uintptr_t) cp) & 0xFF) ^ 0xC5))) {
228 PB(TRACE_ERROR, "Corrupted: ", e);
235 static int check_elem(struct user_mem_elem *ap)
237 struct user_mem_elem *e;
239 /* Validate queue links */
243 if ((uintptr_t)ap & 0x3) {
244 EMSG("corrupted allocations");
248 e = TAILQ_NEXT(ap, link);
249 if (e != NULL && TAILQ_PREV(e, user_mem_head, link) != ap) {
250 PB(TRACE_ERROR, "Orphaned: ", e);
254 e = TAILQ_PREV(ap, user_mem_head, link);
255 if (e != NULL && TAILQ_NEXT(e, link) != ap) {
256 PB(TRACE_ERROR, "Orphaned: ", e);
260 return check_elem_end(ap);
263 /* In debug mode, trap PC element are corrupted. */
264 static int is_mem_coherent(void)
266 struct user_mem_elem *e;
268 TAILQ_FOREACH(e, &user_mem_head, link) {
269 if (!check_elem(e)) {
277 #else /* CFG_TEE_CORE_USER_MEM_DEBUG */
278 static void heap_inc(size_t size __unused)
282 static void heap_dec(size_t size __unused)
286 #define PB(trace_level, prefix, elem) do {} while (0)
287 #endif /* CFG_TEE_CORE_USER_MEM_DEBUG */
294 * Allocate buffer, enqueing on the orphaned buffer tracking list.
296 void *tee_user_mem_alloc(size_t len, uint32_t hint)
301 len + sizeof(struct user_mem_elem) + CANARY_LINE_SIZE;
304 INMSG("%zu 0x%" PRIx32, len, hint);
312 OUTMSG("%p", ARTIST);
313 return (void *)ARTIST;
318 case TEE_MALLOC_FILL_ZERO:
319 case TEE_USER_MEM_HINT_NO_FILL_ZERO:
322 EMSG("Invalid alloc hint [0x%" PRIx32 "]", hint);
327 cp = utee_malloc(total_len);
329 struct user_mem_elem *e = (struct user_mem_elem *)(void *)cp;
334 /* Enqueue buffer on allocated list */
335 TAILQ_INSERT_TAIL(&user_mem_head, e, link);
337 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
338 /* Emplace end-clobber detector at end of buffer */
339 cp[total_len - CANARY_LINE_SIZE] =
340 (((uintptr_t) cp) & 0xFF) ^ 0xC5;
343 PB(TRACE_DEBUG, "Allocate: ", (void *)e);
347 if (hint == TEE_MALLOC_FILL_ZERO)
349 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
350 else if (hint == (typeof(hint)) TEE_USER_MEM_HINT_NO_FILL_ZERO)
351 /* Fill buffer with init pattern */
352 memset(buf, 0xBB, len);
361 * Adjust the size of a previously allocated buffer. Because of the need to
362 * maintain our control storage, tee_user_mem_realloc() must always allocate a
363 * new block and copy the data in the old block. This may result in programs
364 * which make heavy use of realloc() running much slower than normally.
366 void *tee_user_mem_realloc(void *buffer, size_t len)
370 struct user_mem_elem *e;
372 INMSG("[%p:%d]", buffer, (int)len);
379 /* If the old block pointer
381 * - or was allocated with a zero size,
382 * - or invalid buffer
383 * treat realloc() as a malloc(). */
384 if (buffer == NULL || buffer == ARTIST || !is_buffer_valid(buffer)) {
385 buf = tee_user_mem_alloc(len, DEFAULT_TEE_MALLOC_HINT);
391 * If the old and new sizes are the same, be a nice guy and just return
392 * the buffer passed in.
394 e = (struct user_mem_elem *)elem_addr(buffer);
397 OUTMSG("[%p]", buffer);
402 * Sizes differ. Allocate a new buffer of the requested size. If we
403 * can't obtain such a buffer, return NULL from realloc() and leave the
404 * buffer in ptr intact.
406 buf = tee_user_mem_alloc(len, e->hint);
408 memcpy(buf, buffer, MIN(len, olen));
410 /* All done. Free and dechain the original buffer. */
411 tee_user_mem_free(buffer);
419 * Update free pool availability. free is never called except through this
420 * interface. free(x) is defined to generate a call to this routine.
422 void tee_user_mem_free(void *buffer)
425 struct user_mem_elem *e;
427 INMSG("[%p]", buffer);
429 /* It is OK to free NULL */
430 if (buffer == NULL || buffer == ARTIST)
433 /* Check if the buffer is valid */
434 if (!is_buffer_valid(buffer)) {
435 EMSG("unknown freed buffer [%p]", buffer);
439 cp = elem_addr(buffer);
440 e = (struct user_mem_elem *)(void *)cp;
442 PB(TRACE_DEBUG, "Free: ", (void *)e);
444 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
445 if (!check_elem(e)) {
446 EMSG("corrupted allocation");
451 TAILQ_REMOVE(&user_mem_head, e, link);
455 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
457 * Now we wipe the contents of the just-released buffer with "designer
458 * garbage" (Duff Kurland's phrase) of alternating bits. This is
459 * intended to ruin the day for any miscreant who attempts to access
460 * data through a pointer into storage that's been previously released.
462 memset(cp, 0xAA, e->len);
470 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
472 * Accessors to mark the heap.
474 void tee_user_mem_mark_heap(void)
477 /* Reset the marker */
483 * Accessors to check the heap and the whole list.
484 * Return 0 means no leak and link list is valid.
485 * Return >0 return nb bytes of leak.
487 size_t tee_user_mem_check_heap(void)
490 INMSG("%zu", heap_level);
493 EMSG("ta heap has changed of [%zu]", heap_level);
494 OUTMSG("%zu", heap_level);
498 res = !is_mem_coherent();
505 * Dump the stats and elements of the memory activity.
507 void tee_user_mem_status(struct tee_user_mem_stats *stats)
509 struct user_mem_elem *e;
511 memcpy(stats, &global_stats, sizeof(struct tee_user_mem_stats));
513 if (global_stats.nb_alloc > 0) {
514 IMSG("Nb alloc:\t[%d]", global_stats.nb_alloc);
515 IMSG("Size:\t[%zu]", global_stats.size);
518 TAILQ_FOREACH(e, &user_mem_head, link) {
519 PB(TRACE_ERROR, "", e);
523 void tee_user_mem_mark_heap(void)
527 size_t tee_user_mem_check_heap(void)
531 #endif /* CFG_TEE_CORE_USER_MEM_DEBUG */
534 * Free memory allocated from a specific TA.
536 void tee_user_mem_garbage(void)
538 #if (CFG_TEE_CORE_USER_MEM_DEBUG == 1)
539 tee_user_mem_status(NULL);
542 while (TAILQ_FIRST(&user_mem_head) != NULL)
543 tee_user_mem_free(buf_addr(TAILQ_FIRST(&user_mem_head)));
546 #endif /* CFG_NO_USER_MALLOC_GARBAGE */