2 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg 20000 /* Generate built-in test program
37 if defined. The value specifies
38 how many buffer allocation attempts
39 the test program should make. */
49 /* Buffer allocation size quantum:
50 all buffers allocated are a
51 multiple of this size. This
52 MUST be a power of two. */
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump 1 /* Define this symbol to enable the
56 bpoold() function which dumps the
57 buffers in a buffer pool. */
59 #define BufValid 1 /* Define this symbol to enable the
60 bpoolv() function for validating
63 #define DumpData 1 /* Define this symbol to enable the
64 bufdump() function which allows
65 dumping the contents of an allocated
68 #define BufStats 1 /* Define this symbol to enable the
69 bstats() function which calculates
70 the total free space in the buffer
71 pool, the largest available
72 buffer, and the total space
73 currently allocated. */
75 #define FreeWipe 1 /* Wipe free buffers to a guaranteed
76 pattern of garbage to trip up
77 miscreants who attempt to use
78 pointers into released buffers. */
80 #define BestFit 1 /* Use a best fit algorithm when
81 searching for space for an
82 allocation request. This uses
83 memory more efficiently, but
84 allocation will be much slower. */
86 #define BECtl 1 /* Define this symbol to enable the
87 bectl() function for automatic
88 pool space control. */
102 #include <compiler.h>
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/thread.h>
114 #include <kernel/spinlock.h>
116 static uint32_t malloc_lock(void)
120 exceptions = thread_mask_exceptions(
121 THREAD_EXCP_NATIVE_INTR | THREAD_EXCP_FOREIGN_INTR);
122 cpu_spin_lock(&__malloc_spinlock);
126 static void malloc_unlock(uint32_t exceptions)
128 cpu_spin_unlock(&__malloc_spinlock);
129 thread_unmask_exceptions(exceptions);
132 static void tag_asan_free(void *buf, size_t len)
134 asan_tag_heap_free(buf, (uint8_t *)buf + len);
137 static void tag_asan_alloced(void *buf, size_t len)
139 asan_tag_access(buf, (uint8_t *)buf + len);
143 /* Compiling for TA */
144 static uint32_t malloc_lock(void)
149 static void malloc_unlock(uint32_t exceptions __unused)
153 static void tag_asan_free(void *buf __unused, size_t len __unused)
157 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
160 #endif /*__KERNEL__*/
162 #include "bget.c" /* this is ugly, but this is bget */
169 static struct malloc_pool *malloc_pool;
170 static size_t malloc_pool_len;
174 static struct malloc_stats mstats;
176 static void raw_malloc_return_hook(void *p, size_t requested_size)
178 if (totalloc > mstats.max_allocated)
179 mstats.max_allocated = totalloc;
182 mstats.num_alloc_fail++;
183 if (requested_size > mstats.biggest_alloc_fail) {
184 mstats.biggest_alloc_fail = requested_size;
185 mstats.biggest_alloc_fail_used = totalloc;
190 void malloc_reset_stats(void)
192 unsigned int exceptions = malloc_lock();
194 mstats.max_allocated = 0;
195 mstats.num_alloc_fail = 0;
196 mstats.biggest_alloc_fail = 0;
197 mstats.biggest_alloc_fail_used = 0;
198 malloc_unlock(exceptions);
201 void malloc_get_stats(struct malloc_stats *stats)
203 uint32_t exceptions = malloc_lock();
205 memcpy(stats, &mstats, sizeof(*stats));
206 stats->allocated = totalloc;
207 malloc_unlock(exceptions);
212 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
216 #endif /* BufStats */
219 static void raw_malloc_validate_pools(void)
223 for (n = 0; n < malloc_pool_len; n++)
224 bpoolv(malloc_pool[n].buf);
227 static void raw_malloc_validate_pools(void)
232 struct bpool_iterator {
233 struct bfhead *next_buf;
237 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
239 iterator->pool_idx = 0;
240 iterator->next_buf = BFH(malloc_pool[0].buf);
243 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
244 size_t *len, bool *isfree)
246 struct bfhead *b = iterator->next_buf;
247 bufsize bs = b->bh.bsize;
253 /* Allocated buffer */
261 /* Assert that the free list links are intact */
262 assert(b->ql.blink->ql.flink == b);
263 assert(b->ql.flink->ql.blink == b);
266 *buf = (uint8_t *)b + sizeof(struct bhead);
267 *len = bs - sizeof(struct bhead);
269 iterator->next_buf = BFH((uint8_t *)b + bs);
273 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
279 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
285 if ((iterator->pool_idx + 1) >= malloc_pool_len)
288 iterator->pool_idx++;
289 iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
293 /* Convenience macro for looping over all allocated buffers */
294 #define BPOOL_FOREACH(iterator, bp) \
295 for (bpool_foreach_iterator_init((iterator)); \
296 bpool_foreach((iterator), (bp));)
298 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
301 size_t s = hdr_size + ftr_size + pl_size;
304 * Make sure that malloc has correct alignment of returned buffers.
305 * The assumption is that uintptr_t will be as wide as the largest
306 * required alignment of any type.
308 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
310 raw_malloc_validate_pools();
316 /* BGET doesn't like 0 sized allocations */
322 raw_malloc_return_hook(ptr, pl_size);
327 static void raw_free(void *ptr)
329 raw_malloc_validate_pools();
335 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
338 size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
341 raw_malloc_validate_pools();
344 if (s < pl_nmemb || s < pl_size)
347 /* BGET doesn't like 0 sized allocations */
353 raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
358 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
361 size_t s = hdr_size + ftr_size + pl_size;
368 raw_malloc_validate_pools();
370 /* BGET doesn't like 0 sized allocations */
376 raw_malloc_return_hook(p, pl_size);
381 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
383 assert(BH((char *)bf + size) == bn);
384 assert(bn->bsize < 0); /* Next block should be allocated */
385 /* Next block shouldn't already have free block in front */
386 assert(bn->prevfree == 0);
388 /* Create the free buf header */
392 /* Update next block to point to the new free buf header */
395 /* Insert the free buffer on the free list */
396 assert(freelist.ql.blink->ql.flink == &freelist);
397 assert(freelist.ql.flink->ql.blink == &freelist);
398 bf->ql.flink = &freelist;
399 bf->ql.blink = freelist.ql.blink;
400 freelist.ql.blink = bf;
401 bf->ql.blink->ql.flink = bf;
404 static void brel_before(char *orig_buf, char *new_buf)
411 assert(orig_buf < new_buf);
412 /* There has to be room for the freebuf header */
413 size = (bufsize)(new_buf - orig_buf);
414 assert(size >= (SizeQ + sizeof(struct bhead)));
416 /* Point to head of original buffer */
417 bf = BFH(orig_buf - sizeof(struct bhead));
418 orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
420 /* Point to head of the becoming new allocated buffer */
421 b = BH(new_buf - sizeof(struct bhead));
423 if (bf->bh.prevfree != 0) {
424 /* Previous buffer is free, consolidate with that buffer */
427 /* Update the previous free buffer */
428 bfp = BFH((char *)bf - bf->bh.prevfree);
429 assert(bfp->bh.bsize == bf->bh.prevfree);
430 bfp->bh.bsize += size;
432 /* Make a new allocated buffer header */
433 b->prevfree = bfp->bh.bsize;
434 /* Make it negative since it's an allocated buffer */
435 b->bsize = -(orig_size - size);
438 * Previous buffer is allocated, create a new buffer and
439 * insert on the free list.
442 /* Make it negative since it's an allocated buffer */
443 b->bsize = -(orig_size - size);
445 create_free_block(bf, size, b);
450 assert(totalloc >= 0);
454 static void brel_after(char *buf, bufsize size)
456 struct bhead *b = BH(buf - sizeof(struct bhead));
458 bufsize new_size = size;
461 /* Select the size in the same way as in bget() */
462 if (new_size < SizeQ)
466 new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
469 new_size += sizeof(struct bhead);
470 assert(new_size <= -b->bsize);
473 * Check if there's enough space at the end of the buffer to be
474 * able to free anything.
476 free_size = -b->bsize - new_size;
477 if (free_size < SizeQ + sizeof(struct bhead))
480 bn = BH((char *)b - b->bsize);
482 * Set the new size of the buffer;
484 b->bsize = -new_size;
486 /* Next buffer is free, consolidate with that buffer */
487 struct bfhead *bfn = BFH(bn);
488 struct bfhead *nbf = BFH((char *)b + new_size);
489 struct bhead *bnn = BH((char *)bn + bn->bsize);
491 assert(bfn->bh.prevfree == 0);
492 assert(bnn->prevfree == bfn->bh.bsize);
494 /* Construct the new free header */
495 nbf->bh.prevfree = 0;
496 nbf->bh.bsize = bfn->bh.bsize + free_size;
498 /* Update the buffer after this to point to this header */
499 bnn->prevfree += free_size;
502 * Unlink the previous free buffer and link the new free
505 assert(bfn->ql.blink->ql.flink == bfn);
506 assert(bfn->ql.flink->ql.blink == bfn);
508 /* Assing blink and flink from old free buffer */
509 nbf->ql.blink = bfn->ql.blink;
510 nbf->ql.flink = bfn->ql.flink;
512 /* Replace the old free buffer with the new one */
513 nbf->ql.blink->ql.flink = nbf;
514 nbf->ql.flink->ql.blink = nbf;
516 /* New buffer is allocated, create a new free buffer */
517 create_free_block(BFH((char *)b + new_size), free_size, bn);
521 totalloc -= free_size;
522 assert(totalloc >= 0);
527 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
533 raw_malloc_validate_pools();
535 if (!IS_POWER_OF_TWO(alignment))
539 * Normal malloc with headers always returns something SizeQuant
542 if (alignment <= SizeQuant)
543 return raw_malloc(hdr_size, ftr_size, size);
545 s = hdr_size + ftr_size + alignment + size +
546 SizeQ + sizeof(struct bhead);
549 if (s < alignment || s < size)
552 b = (uintptr_t)bget(s);
556 if ((b + hdr_size) & (alignment - 1)) {
558 * Returned buffer is not aligned as requested if the
559 * hdr_size is added. Find an offset into the buffer
560 * that is far enough in to the buffer to be able to free
566 * Find the point where the buffer including supplied
567 * header size should start.
569 p = b + hdr_size + alignment;
570 p &= ~(alignment - 1);
572 if ((p - b) < (SizeQ + sizeof(struct bhead)))
574 assert((p + hdr_size + ftr_size + size) <= (b + s));
576 /* Free the front part of the buffer */
577 brel_before((void *)b, (void *)p);
579 /* Set the new start of the buffer */
584 * Since b is now aligned, release what we don't need at the end of
587 brel_after((void *)b, hdr_size + ftr_size + size);
589 raw_malloc_return_hook((void *)b, size);
594 /* Most of the stuff in this function is copied from bgetr() in bget.c */
595 static __maybe_unused bufsize bget_buf_size(void *buf)
597 bufsize osize; /* Old size of buffer */
600 b = BH(((char *)buf) - sizeof(struct bhead));
604 /* Buffer acquired directly through acqfcn. */
607 bd = BDH(((char *)buf) - sizeof(struct bdhead));
608 osize = bd->tsize - sizeof(struct bdhead);
611 osize -= sizeof(struct bhead);
628 #define MDBG_HEADER_MAGIC 0xadadadad
629 #define MDBG_FOOTER_MAGIC 0xecececec
631 static size_t mdbg_get_ftr_size(size_t pl_size)
633 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
635 return ftr_pad + sizeof(uint32_t);
638 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
642 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
643 mdbg_get_ftr_size(hdr->pl_size));
648 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
649 int lineno, size_t pl_size)
655 hdr->pl_size = pl_size;
656 hdr->magic = MDBG_HEADER_MAGIC;
658 footer = mdbg_get_footer(hdr);
659 *footer = MDBG_FOOTER_MAGIC;
662 void *mdbg_malloc(const char *fname, int lineno, size_t size)
664 struct mdbg_hdr *hdr;
665 uint32_t exceptions = malloc_lock();
668 * Check struct mdbg_hdr doesn't get bad alignment.
669 * This is required by C standard: the buffer returned from
670 * malloc() should be aligned with a fundamental alignment.
671 * For ARM32, the required alignment is 8. For ARM64, it is 16.
674 (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
676 hdr = raw_malloc(sizeof(struct mdbg_hdr),
677 mdbg_get_ftr_size(size), size);
679 mdbg_update_hdr(hdr, fname, lineno, size);
683 malloc_unlock(exceptions);
687 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
689 assert(hdr->magic == MDBG_HEADER_MAGIC);
690 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
693 static void mdbg_free(void *ptr)
695 struct mdbg_hdr *hdr = ptr;
701 *mdbg_get_footer(hdr) = 0;
708 uint32_t exceptions = malloc_lock();
711 malloc_unlock(exceptions);
714 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
716 struct mdbg_hdr *hdr;
717 uint32_t exceptions = malloc_lock();
719 hdr = raw_calloc(sizeof(struct mdbg_hdr),
720 mdbg_get_ftr_size(nmemb * size), nmemb, size);
722 mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
725 malloc_unlock(exceptions);
729 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
730 void *ptr, size_t size)
732 struct mdbg_hdr *hdr = ptr;
738 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
739 mdbg_get_ftr_size(size), size);
741 mdbg_update_hdr(hdr, fname, lineno, size);
747 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
750 uint32_t exceptions = malloc_lock();
752 p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
753 malloc_unlock(exceptions);
757 #define realloc_unlocked(ptr, size) \
758 mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
760 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
763 struct mdbg_hdr *hdr;
764 uint32_t exceptions = malloc_lock();
766 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
769 mdbg_update_hdr(hdr, fname, lineno, size);
772 malloc_unlock(exceptions);
777 static void *get_payload_start_size(void *raw_buf, size_t *size)
779 struct mdbg_hdr *hdr = raw_buf;
781 assert(bget_buf_size(hdr) >= hdr->pl_size);
782 *size = hdr->pl_size;
786 void mdbg_check(int bufdump)
788 struct bpool_iterator itr;
790 uint32_t exceptions = malloc_lock();
792 raw_malloc_validate_pools();
794 BPOOL_FOREACH(&itr, &b) {
795 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
800 const char *fname = hdr->fname;
805 IMSG("buffer: %d bytes %s:%d\n",
806 hdr->pl_size, fname, hdr->line);
810 malloc_unlock(exceptions);
815 void *malloc(size_t size)
818 uint32_t exceptions = malloc_lock();
820 p = raw_malloc(0, 0, size);
821 malloc_unlock(exceptions);
827 uint32_t exceptions = malloc_lock();
830 malloc_unlock(exceptions);
833 void *calloc(size_t nmemb, size_t size)
836 uint32_t exceptions = malloc_lock();
838 p = raw_calloc(0, 0, nmemb, size);
839 malloc_unlock(exceptions);
843 static void *realloc_unlocked(void *ptr, size_t size)
845 return raw_realloc(ptr, 0, 0, size);
848 void *realloc(void *ptr, size_t size)
851 uint32_t exceptions = malloc_lock();
853 p = realloc_unlocked(ptr, size);
854 malloc_unlock(exceptions);
858 void *memalign(size_t alignment, size_t size)
861 uint32_t exceptions = malloc_lock();
863 p = raw_memalign(0, 0, alignment, size);
864 malloc_unlock(exceptions);
868 static void *get_payload_start_size(void *ptr, size_t *size)
870 *size = bget_buf_size(ptr);
876 void malloc_add_pool(void *buf, size_t len)
881 uintptr_t start = (uintptr_t)buf;
882 uintptr_t end = start + len;
883 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
884 (~(SizeQuant - 1))) +
885 sizeof(struct bhead) * 2;
888 start = ROUNDUP(start, SizeQuant);
889 end = ROUNDDOWN(end, SizeQuant);
892 if ((end - start) < min_len) {
893 DMSG("Skipping too small pool");
897 exceptions = malloc_lock();
898 tag_asan_free((void *)start, end - start);
899 bpool((void *)start, end - start);
900 l = malloc_pool_len + 1;
901 p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
904 malloc_pool[malloc_pool_len].buf = (void *)start;
905 malloc_pool[malloc_pool_len].len = end - start;
907 mstats.size += malloc_pool[malloc_pool_len].len;
910 malloc_unlock(exceptions);
913 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
915 struct bpool_iterator itr;
917 uint8_t *start_buf = buf;
918 uint8_t *end_buf = start_buf + len;
920 uint32_t exceptions = malloc_lock();
922 raw_malloc_validate_pools();
924 /* Check for wrapping */
925 if (start_buf > end_buf)
928 BPOOL_FOREACH(&itr, &b) {
933 start_b = get_payload_start_size(b, &s);
936 if (start_buf >= start_b && end_buf <= end_b) {
943 malloc_unlock(exceptions);
948 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
950 uintptr_t buf_start = (uintptr_t) buf;
951 uintptr_t buf_end = buf_start + len;
954 uint32_t exceptions = malloc_lock();
956 raw_malloc_validate_pools();
958 for (n = 0; n < malloc_pool_len; n++) {
959 uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
960 uintptr_t pool_end = pool_start + malloc_pool[n].len;
962 if (buf_start > buf_end || pool_start > pool_end) {
963 ret = true; /* Wrapping buffers, shouldn't happen */
967 if (buf_end > pool_start || buf_start < pool_end) {
974 malloc_unlock(exceptions);