From a771a91dcbe637bc545da510fe5a9a671ab5e977 Mon Sep 17 00:00:00 2001 From: Simon Tatham Date: Fri, 19 Aug 2022 15:07:55 +0100 Subject: [PATCH] [libcxxabi] Fix alignment of pointers returned by fallback_malloc This aligns the ``heap[]`` array in ``fallback_malloc.cpp`` to ensure that it can be safely cast to ``heap_node*``, and also adjusts the allocation algorithm to ensure that every allocated block has the alignment requested by ``__attribute__((aligned))``, by putting the block's ``heap_node`` header 4 bytes before an aligned address. Patch originally by Eric Fiselier: this is an updated version of D12669, which was never landed. Reviewed By: ldionne, #libc_abi Differential Revision: https://reviews.llvm.org/D129842 --- libcxxabi/src/fallback_malloc.cpp | 70 ++++++++++++++++++++++++---- libcxxabi/test/test_fallback_malloc.pass.cpp | 16 +++++-- 2 files changed, 73 insertions(+), 13 deletions(-) diff --git a/libcxxabi/src/fallback_malloc.cpp b/libcxxabi/src/fallback_malloc.cpp index 1d6c380..591efbe 100644 --- a/libcxxabi/src/fallback_malloc.cpp +++ b/libcxxabi/src/fallback_malloc.cpp @@ -15,6 +15,7 @@ #endif #endif +#include #include // for malloc, calloc, free #include // for memset #include // for std::__libcpp_aligned_{alloc,free} @@ -63,11 +64,28 @@ char heap[HEAP_SIZE] __attribute__((aligned)); typedef unsigned short heap_offset; typedef unsigned short heap_size; +// On both 64 and 32 bit targets heap_node should have the following properties +// Size: 4 +// Alignment: 2 struct heap_node { heap_offset next_node; // offset into heap heap_size len; // size in units of "sizeof(heap_node)" }; +// All pointers returned by fallback_malloc must be at least aligned +// as RequiredAligned. Note that RequiredAlignment can be greater than +// alignof(std::max_align_t) on 64 bit systems compiling 32 bit code. +struct FallbackMaxAlignType { +} __attribute__((aligned)); +const size_t RequiredAlignment = alignof(FallbackMaxAlignType); + +static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0, + "The required alignment must be evenly divisible by the sizeof(heap_node)"); + +// The number of heap_node's that can fit in a chunk of memory with the size +// of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4. +const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node); + static const heap_node* list_end = (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap static heap_node* freelist = NULL; @@ -82,10 +100,23 @@ heap_offset offset_from_node(const heap_node* ptr) { sizeof(heap_node)); } +// Return a pointer to the first address, 'A', in `heap` that can actually be +// used to represent a heap_node. 'A' must be aligned so that +// '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this +// address should be 12 bytes after the first 16 byte boundary. +heap_node* getFirstAlignedNodeInHeap() { + heap_node* node = (heap_node*)heap; + const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node); + size_t boundaryOffset = reinterpret_cast(node) % RequiredAlignment; + size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset; + size_t NElemOffset = requiredOffset / sizeof(heap_node); + return node + NElemOffset; +} + void init_heap() { - freelist = (heap_node*)heap; + freelist = getFirstAlignedNodeInHeap(); freelist->next_node = offset_from_node(list_end); - freelist->len = HEAP_SIZE / sizeof(heap_node); + freelist->len = static_cast(list_end - freelist); } // How big a chunk we allocate @@ -109,23 +140,44 @@ void* fallback_malloc(size_t len) { for (p = freelist, prev = 0; p && p != list_end; prev = p, p = node_from_offset(p->next_node)) { - if (p->len > nelems) { // chunk is larger, shorten, and return the tail - heap_node* q; + // Check the invariant that all heap_nodes pointers 'p' are aligned + // so that 'p + 1' has an alignment of at least RequiredAlignment + assert(reinterpret_cast(p + 1) % RequiredAlignment == 0); + + // Calculate the number of extra padding elements needed in order + // to split 'p' and create a properly aligned heap_node from the tail + // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems' + // will be a multiple of NodesPerAlignment. + size_t aligned_nelems = nelems; + if (p->len > nelems) { + heap_size remaining_len = static_cast(p->len - nelems); + aligned_nelems += remaining_len % NodesPerAlignment; + } - p->len = static_cast(p->len - nelems); + // chunk is larger and we can create a properly aligned heap_node + // from the tail. In this case we shorten 'p' and return the tail. + if (p->len > aligned_nelems) { + heap_node* q; + p->len = static_cast(p->len - aligned_nelems); q = p + p->len; q->next_node = 0; - q->len = static_cast(nelems); - return (void*)(q + 1); + q->len = static_cast(aligned_nelems); + void* ptr = q + 1; + assert(reinterpret_cast(ptr) % RequiredAlignment == 0); + return ptr; } - if (p->len == nelems) { // exact size match + // The chunk is the exact size or the chunk is larger but not large + // enough to split due to alignment constraints. + if (p->len >= nelems) { if (prev == 0) freelist = node_from_offset(p->next_node); else prev->next_node = p->next_node; p->next_node = 0; - return (void*)(p + 1); + void* ptr = p + 1; + assert(reinterpret_cast(ptr) % RequiredAlignment == 0); + return ptr; } } return NULL; // couldn't find a spot big enough diff --git a/libcxxabi/test/test_fallback_malloc.pass.cpp b/libcxxabi/test/test_fallback_malloc.pass.cpp index 5f429ef..d7decd9 100644 --- a/libcxxabi/test/test_fallback_malloc.pass.cpp +++ b/libcxxabi/test/test_fallback_malloc.pass.cpp @@ -8,9 +8,11 @@ #include #include +#include #include <__threading_support> +// UNSUPPORTED: c++03 // UNSUPPORTED: modules-build && no-threads // Necessary because we include a private source file of libc++abi, which @@ -26,12 +28,16 @@ typedef std::deque container; #define INSTRUMENT_FALLBACK_MALLOC #include "../src/fallback_malloc.cpp" +void assertAlignment(void* ptr) { assert(reinterpret_cast(ptr) % alignof(FallbackMaxAlignType) == 0); } + container alloc_series ( size_t sz ) { container ptrs; void *p; - while ( NULL != ( p = fallback_malloc ( sz ))) - ptrs.push_back ( p ); + while (NULL != (p = fallback_malloc(sz))) { + assertAlignment(p); + ptrs.push_back(p); + } return ptrs; } @@ -40,8 +46,9 @@ container alloc_series ( size_t sz, float growth ) { void *p; while ( NULL != ( p = fallback_malloc ( sz ))) { - ptrs.push_back ( p ); - sz *= growth; + assertAlignment(p); + ptrs.push_back(p); + sz *= growth; } return ptrs; @@ -55,6 +62,7 @@ container alloc_series ( const size_t *first, size_t len ) { for ( const size_t *iter = first; iter != last; ++iter ) { if ( NULL == (p = fallback_malloc ( *iter ))) break; + assertAlignment(p); ptrs.push_back ( p ); } -- 2.7.4