2 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg 20000 /* Generate built-in test program
37 if defined. The value specifies
38 how many buffer allocation attempts
39 the test program should make. */
49 /* Buffer allocation size quantum:
50 all buffers allocated are a
51 multiple of this size. This
52 MUST be a power of two. */
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump 1 /* Define this symbol to enable the
56 bpoold() function which dumps the
57 buffers in a buffer pool. */
59 #define BufValid 1 /* Define this symbol to enable the
60 bpoolv() function for validating
63 #define DumpData 1 /* Define this symbol to enable the
64 bufdump() function which allows
65 dumping the contents of an allocated
68 #define BufStats 1 /* Define this symbol to enable the
69 bstats() function which calculates
70 the total free space in the buffer
71 pool, the largest available
72 buffer, and the total space
73 currently allocated. */
75 #define FreeWipe 1 /* Wipe free buffers to a guaranteed
76 pattern of garbage to trip up
77 miscreants who attempt to use
78 pointers into released buffers. */
80 #define BestFit 1 /* Use a best fit algorithm when
81 searching for space for an
82 allocation request. This uses
83 memory more efficiently, but
84 allocation will be much slower. */
86 #define BECtl 1 /* Define this symbol to enable the
87 bectl() function for automatic
88 pool space control. */
102 #include <compiler.h>
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/thread.h>
114 #include <kernel/spinlock.h>
116 static uint32_t malloc_lock(void)
120 exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ | THREAD_EXCP_FIQ);
121 cpu_spin_lock(&__malloc_spinlock);
125 static void malloc_unlock(uint32_t exceptions)
127 cpu_spin_unlock(&__malloc_spinlock);
128 thread_unmask_exceptions(exceptions);
131 static void tag_asan_free(void *buf, size_t len)
133 asan_tag_heap_free(buf, (uint8_t *)buf + len);
136 static void tag_asan_alloced(void *buf, size_t len)
138 asan_tag_access(buf, (uint8_t *)buf + len);
142 /* Compiling for TA */
143 static uint32_t malloc_lock(void)
148 static void malloc_unlock(uint32_t exceptions __unused)
152 static void tag_asan_free(void *buf __unused, size_t len __unused)
156 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
159 #endif /*__KERNEL__*/
161 #include "bget.c" /* this is ugly, but this is bget */
168 static struct malloc_pool *malloc_pool;
169 static size_t malloc_pool_len;
173 static struct malloc_stats mstats;
175 static void raw_malloc_return_hook(void *p, size_t requested_size)
177 if (totalloc > mstats.max_allocated)
178 mstats.max_allocated = totalloc;
181 mstats.num_alloc_fail++;
182 if (requested_size > mstats.biggest_alloc_fail) {
183 mstats.biggest_alloc_fail = requested_size;
184 mstats.biggest_alloc_fail_used = totalloc;
189 void malloc_reset_stats(void)
191 unsigned int exceptions = malloc_lock();
193 mstats.max_allocated = 0;
194 mstats.num_alloc_fail = 0;
195 mstats.biggest_alloc_fail = 0;
196 mstats.biggest_alloc_fail_used = 0;
197 malloc_unlock(exceptions);
200 void malloc_get_stats(struct malloc_stats *stats)
202 uint32_t exceptions = malloc_lock();
204 memcpy(stats, &mstats, sizeof(*stats));
205 stats->allocated = totalloc;
206 malloc_unlock(exceptions);
211 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
215 #endif /* BufStats */
218 static void raw_malloc_validate_pools(void)
222 for (n = 0; n < malloc_pool_len; n++)
223 bpoolv(malloc_pool[n].buf);
226 static void raw_malloc_validate_pools(void)
231 struct bpool_iterator {
232 struct bfhead *next_buf;
236 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
238 iterator->pool_idx = 0;
239 iterator->next_buf = BFH(malloc_pool[0].buf);
242 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
243 size_t *len, bool *isfree)
245 struct bfhead *b = iterator->next_buf;
246 bufsize bs = b->bh.bsize;
252 /* Allocated buffer */
260 /* Assert that the free list links are intact */
261 assert(b->ql.blink->ql.flink == b);
262 assert(b->ql.flink->ql.blink == b);
265 *buf = (uint8_t *)b + sizeof(struct bhead);
266 *len = bs - sizeof(struct bhead);
268 iterator->next_buf = BFH((uint8_t *)b + bs);
272 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
278 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
284 if ((iterator->pool_idx + 1) >= malloc_pool_len)
287 iterator->pool_idx++;
288 iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
292 /* Convenience macro for looping over all allocated buffers */
293 #define BPOOL_FOREACH(iterator, bp) \
294 for (bpool_foreach_iterator_init((iterator)); \
295 bpool_foreach((iterator), (bp));)
297 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
300 size_t s = hdr_size + ftr_size + pl_size;
303 * Make sure that malloc has correct alignment of returned buffers.
304 * The assumption is that uintptr_t will be as wide as the largest
305 * required alignment of any type.
307 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
309 raw_malloc_validate_pools();
315 /* BGET doesn't like 0 sized allocations */
321 raw_malloc_return_hook(ptr, pl_size);
326 static void raw_free(void *ptr)
328 raw_malloc_validate_pools();
334 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
337 size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
340 raw_malloc_validate_pools();
343 if (s < pl_nmemb || s < pl_size)
346 /* BGET doesn't like 0 sized allocations */
352 raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
357 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
360 size_t s = hdr_size + ftr_size + pl_size;
367 raw_malloc_validate_pools();
369 /* BGET doesn't like 0 sized allocations */
375 raw_malloc_return_hook(p, pl_size);
380 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
382 assert(BH((char *)bf + size) == bn);
383 assert(bn->bsize < 0); /* Next block should be allocated */
384 /* Next block shouldn't already have free block in front */
385 assert(bn->prevfree == 0);
387 /* Create the free buf header */
391 /* Update next block to point to the new free buf header */
394 /* Insert the free buffer on the free list */
395 assert(freelist.ql.blink->ql.flink == &freelist);
396 assert(freelist.ql.flink->ql.blink == &freelist);
397 bf->ql.flink = &freelist;
398 bf->ql.blink = freelist.ql.blink;
399 freelist.ql.blink = bf;
400 bf->ql.blink->ql.flink = bf;
403 static void brel_before(char *orig_buf, char *new_buf)
410 assert(orig_buf < new_buf);
411 /* There has to be room for the freebuf header */
412 size = (bufsize)(new_buf - orig_buf);
413 assert(size >= (SizeQ + sizeof(struct bhead)));
415 /* Point to head of original buffer */
416 bf = BFH(orig_buf - sizeof(struct bhead));
417 orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
419 /* Point to head of the becoming new allocated buffer */
420 b = BH(new_buf - sizeof(struct bhead));
422 if (bf->bh.prevfree != 0) {
423 /* Previous buffer is free, consolidate with that buffer */
426 /* Update the previous free buffer */
427 bfp = BFH((char *)bf - bf->bh.prevfree);
428 assert(bfp->bh.bsize == bf->bh.prevfree);
429 bfp->bh.bsize += size;
431 /* Make a new allocated buffer header */
432 b->prevfree = bfp->bh.bsize;
433 /* Make it negative since it's an allocated buffer */
434 b->bsize = -(orig_size - size);
437 * Previous buffer is allocated, create a new buffer and
438 * insert on the free list.
441 /* Make it negative since it's an allocated buffer */
442 b->bsize = -(orig_size - size);
444 create_free_block(bf, size, b);
449 assert(totalloc >= 0);
453 static void brel_after(char *buf, bufsize size)
455 struct bhead *b = BH(buf - sizeof(struct bhead));
457 bufsize new_size = size;
460 /* Select the size in the same way as in bget() */
461 if (new_size < SizeQ)
465 new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
468 new_size += sizeof(struct bhead);
469 assert(new_size <= -b->bsize);
472 * Check if there's enough space at the end of the buffer to be
473 * able to free anything.
475 free_size = -b->bsize - new_size;
476 if (free_size < SizeQ + sizeof(struct bhead))
479 bn = BH((char *)b - b->bsize);
481 * Set the new size of the buffer;
483 b->bsize = -new_size;
485 /* Next buffer is free, consolidate with that buffer */
486 struct bfhead *bfn = BFH(bn);
487 struct bfhead *nbf = BFH((char *)b + new_size);
488 struct bhead *bnn = BH((char *)bn + bn->bsize);
490 assert(bfn->bh.prevfree == 0);
491 assert(bnn->prevfree == bfn->bh.bsize);
493 /* Construct the new free header */
494 nbf->bh.prevfree = 0;
495 nbf->bh.bsize = bfn->bh.bsize + free_size;
497 /* Update the buffer after this to point to this header */
498 bnn->prevfree += free_size;
501 * Unlink the previous free buffer and link the new free
504 assert(bfn->ql.blink->ql.flink == bfn);
505 assert(bfn->ql.flink->ql.blink == bfn);
507 /* Assing blink and flink from old free buffer */
508 nbf->ql.blink = bfn->ql.blink;
509 nbf->ql.flink = bfn->ql.flink;
511 /* Replace the old free buffer with the new one */
512 nbf->ql.blink->ql.flink = nbf;
513 nbf->ql.flink->ql.blink = nbf;
515 /* New buffer is allocated, create a new free buffer */
516 create_free_block(BFH((char *)b + new_size), free_size, bn);
520 totalloc -= free_size;
521 assert(totalloc >= 0);
526 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
532 raw_malloc_validate_pools();
534 if (!IS_POWER_OF_TWO(alignment))
538 * Normal malloc with headers always returns something SizeQuant
541 if (alignment <= SizeQuant)
542 return raw_malloc(hdr_size, ftr_size, size);
544 s = hdr_size + ftr_size + alignment + size +
545 SizeQ + sizeof(struct bhead);
548 if (s < alignment || s < size)
551 b = (uintptr_t)bget(s);
555 if ((b + hdr_size) & (alignment - 1)) {
557 * Returned buffer is not aligned as requested if the
558 * hdr_size is added. Find an offset into the buffer
559 * that is far enough in to the buffer to be able to free
565 * Find the point where the buffer including supplied
566 * header size should start.
568 p = b + hdr_size + alignment;
569 p &= ~(alignment - 1);
571 if ((p - b) < (SizeQ + sizeof(struct bhead)))
573 assert((p + hdr_size + ftr_size + size) <= (b + s));
575 /* Free the front part of the buffer */
576 brel_before((void *)b, (void *)p);
578 /* Set the new start of the buffer */
583 * Since b is now aligned, release what we don't need at the end of
586 brel_after((void *)b, hdr_size + ftr_size + size);
588 raw_malloc_return_hook((void *)b, size);
593 /* Most of the stuff in this function is copied from bgetr() in bget.c */
594 static __maybe_unused bufsize bget_buf_size(void *buf)
596 bufsize osize; /* Old size of buffer */
599 b = BH(((char *)buf) - sizeof(struct bhead));
603 /* Buffer acquired directly through acqfcn. */
606 bd = BDH(((char *)buf) - sizeof(struct bdhead));
607 osize = bd->tsize - sizeof(struct bdhead);
610 osize -= sizeof(struct bhead);
627 #define MDBG_HEADER_MAGIC 0xadadadad
628 #define MDBG_FOOTER_MAGIC 0xecececec
630 static size_t mdbg_get_ftr_size(size_t pl_size)
632 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
634 return ftr_pad + sizeof(uint32_t);
637 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
641 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
642 mdbg_get_ftr_size(hdr->pl_size));
647 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
648 int lineno, size_t pl_size)
654 hdr->pl_size = pl_size;
655 hdr->magic = MDBG_HEADER_MAGIC;
657 footer = mdbg_get_footer(hdr);
658 *footer = MDBG_FOOTER_MAGIC;
661 void *mdbg_malloc(const char *fname, int lineno, size_t size)
663 struct mdbg_hdr *hdr;
664 uint32_t exceptions = malloc_lock();
667 * Check struct mdbg_hdr doesn't get bad alignment.
668 * This is required by C standard: the buffer returned from
669 * malloc() should be aligned with a fundamental alignment.
670 * For ARM32, the required alignment is 8. For ARM64, it is 16.
673 (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
675 hdr = raw_malloc(sizeof(struct mdbg_hdr),
676 mdbg_get_ftr_size(size), size);
678 mdbg_update_hdr(hdr, fname, lineno, size);
682 malloc_unlock(exceptions);
686 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
688 assert(hdr->magic == MDBG_HEADER_MAGIC);
689 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
692 static void mdbg_free(void *ptr)
694 struct mdbg_hdr *hdr = ptr;
700 *mdbg_get_footer(hdr) = 0;
707 uint32_t exceptions = malloc_lock();
710 malloc_unlock(exceptions);
713 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
715 struct mdbg_hdr *hdr;
716 uint32_t exceptions = malloc_lock();
718 hdr = raw_calloc(sizeof(struct mdbg_hdr),
719 mdbg_get_ftr_size(nmemb * size), nmemb, size);
721 mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
724 malloc_unlock(exceptions);
728 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
729 void *ptr, size_t size)
731 struct mdbg_hdr *hdr = ptr;
737 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
738 mdbg_get_ftr_size(size), size);
740 mdbg_update_hdr(hdr, fname, lineno, size);
746 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
749 uint32_t exceptions = malloc_lock();
751 p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
752 malloc_unlock(exceptions);
756 #define realloc_unlocked(ptr, size) \
757 mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
759 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
762 struct mdbg_hdr *hdr;
763 uint32_t exceptions = malloc_lock();
765 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
768 mdbg_update_hdr(hdr, fname, lineno, size);
771 malloc_unlock(exceptions);
776 static void *get_payload_start_size(void *raw_buf, size_t *size)
778 struct mdbg_hdr *hdr = raw_buf;
780 assert(bget_buf_size(hdr) >= hdr->pl_size);
781 *size = hdr->pl_size;
785 void mdbg_check(int bufdump)
787 struct bpool_iterator itr;
789 uint32_t exceptions = malloc_lock();
791 raw_malloc_validate_pools();
793 BPOOL_FOREACH(&itr, &b) {
794 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
799 const char *fname = hdr->fname;
804 IMSG("buffer: %d bytes %s:%d\n",
805 hdr->pl_size, fname, hdr->line);
809 malloc_unlock(exceptions);
814 void *malloc(size_t size)
817 uint32_t exceptions = malloc_lock();
819 p = raw_malloc(0, 0, size);
820 malloc_unlock(exceptions);
826 uint32_t exceptions = malloc_lock();
829 malloc_unlock(exceptions);
832 void *calloc(size_t nmemb, size_t size)
835 uint32_t exceptions = malloc_lock();
837 p = raw_calloc(0, 0, nmemb, size);
838 malloc_unlock(exceptions);
842 static void *realloc_unlocked(void *ptr, size_t size)
844 return raw_realloc(ptr, 0, 0, size);
847 void *realloc(void *ptr, size_t size)
850 uint32_t exceptions = malloc_lock();
852 p = realloc_unlocked(ptr, size);
853 malloc_unlock(exceptions);
857 void *memalign(size_t alignment, size_t size)
860 uint32_t exceptions = malloc_lock();
862 p = raw_memalign(0, 0, alignment, size);
863 malloc_unlock(exceptions);
867 static void *get_payload_start_size(void *ptr, size_t *size)
869 *size = bget_buf_size(ptr);
875 void malloc_add_pool(void *buf, size_t len)
880 uintptr_t start = (uintptr_t)buf;
881 uintptr_t end = start + len;
882 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
883 (~(SizeQuant - 1))) +
884 sizeof(struct bhead) * 2;
887 start = ROUNDUP(start, SizeQuant);
888 end = ROUNDDOWN(end, SizeQuant);
891 if ((end - start) < min_len) {
892 DMSG("Skipping too small pool");
896 exceptions = malloc_lock();
897 tag_asan_free((void *)start, end - start);
898 bpool((void *)start, end - start);
899 l = malloc_pool_len + 1;
900 p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
903 malloc_pool[malloc_pool_len].buf = (void *)start;
904 malloc_pool[malloc_pool_len].len = end - start;
906 mstats.size += malloc_pool[malloc_pool_len].len;
909 malloc_unlock(exceptions);
912 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
914 struct bpool_iterator itr;
916 uint8_t *start_buf = buf;
917 uint8_t *end_buf = start_buf + len;
919 uint32_t exceptions = malloc_lock();
921 raw_malloc_validate_pools();
923 /* Check for wrapping */
924 if (start_buf > end_buf)
927 BPOOL_FOREACH(&itr, &b) {
932 start_b = get_payload_start_size(b, &s);
935 if (start_buf >= start_b && end_buf <= end_b) {
942 malloc_unlock(exceptions);
947 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
949 uintptr_t buf_start = (uintptr_t) buf;
950 uintptr_t buf_end = buf_start + len;
953 uint32_t exceptions = malloc_lock();
955 raw_malloc_validate_pools();
957 for (n = 0; n < malloc_pool_len; n++) {
958 uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
959 uintptr_t pool_end = pool_start + malloc_pool[n].len;
961 if (buf_start > buf_end || pool_start > pool_end) {
962 ret = true; /* Wrapping buffers, shouldn't happen */
966 if (buf_end > pool_start || buf_start < pool_end) {
973 malloc_unlock(exceptions);