1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // This allocator is used inside run-times.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_allocator.h"
15 #include "sanitizer_allocator_internal.h"
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_common.h"
19 namespace __sanitizer {
21 // ThreadSanitizer for Go uses libc malloc/free.
22 #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
23 # if SANITIZER_LINUX && !SANITIZER_ANDROID
24 extern "C" void *__libc_malloc(uptr size);
26 extern "C" void *__libc_memalign(uptr alignment, uptr size);
28 extern "C" void *__libc_realloc(void *ptr, uptr size);
29 extern "C" void __libc_free(void *ptr);
32 # define __libc_malloc malloc
34 static void *__libc_memalign(uptr alignment, uptr size) {
36 uptr error = posix_memalign(&p, alignment, size);
37 if (error) return nullptr;
41 # define __libc_realloc realloc
42 # define __libc_free free
45 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
50 return __libc_malloc(size);
52 return __libc_memalign(alignment, size);
54 // Windows does not provide __libc_memalign/posix_memalign. It provides
55 // __aligned_malloc, but the allocated blocks can't be passed to free,
56 // they need to be passed to __aligned_free. InternalAlloc interface does
57 // not account for such requirement. Alignemnt does not seem to be used
58 // anywhere in runtime, so just call __libc_malloc for now.
59 DCHECK_EQ(alignment, 0);
60 return __libc_malloc(size);
64 static void *RawInternalRealloc(void *ptr, uptr size,
65 InternalAllocatorCache *cache) {
67 return __libc_realloc(ptr, size);
70 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
75 InternalAllocator *internal_allocator() {
79 #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
81 static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
82 static atomic_uint8_t internal_allocator_initialized;
83 static StaticSpinMutex internal_alloc_init_mu;
85 static InternalAllocatorCache internal_allocator_cache;
86 static StaticSpinMutex internal_allocator_cache_mu;
88 InternalAllocator *internal_allocator() {
89 InternalAllocator *internal_allocator_instance =
90 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
91 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
92 SpinMutexLock l(&internal_alloc_init_mu);
93 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
95 internal_allocator_instance->Init(/* may_return_null*/ false);
96 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
99 return internal_allocator_instance;
102 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
104 if (alignment == 0) alignment = 8;
106 SpinMutexLock l(&internal_allocator_cache_mu);
107 return internal_allocator()->Allocate(&internal_allocator_cache, size,
110 return internal_allocator()->Allocate(cache, size, alignment, false);
113 static void *RawInternalRealloc(void *ptr, uptr size,
114 InternalAllocatorCache *cache) {
117 SpinMutexLock l(&internal_allocator_cache_mu);
118 return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
121 return internal_allocator()->Reallocate(cache, ptr, size, alignment);
124 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
126 SpinMutexLock l(&internal_allocator_cache_mu);
127 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
129 internal_allocator()->Deallocate(cache, ptr);
132 #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
134 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
136 void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
137 if (size + sizeof(u64) < size)
139 void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
142 ((u64*)p)[0] = kBlockMagic;
143 return (char*)p + sizeof(u64);
146 void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
148 return InternalAlloc(size, cache);
149 if (size + sizeof(u64) < size)
151 addr = (char*)addr - sizeof(u64);
152 size = size + sizeof(u64);
153 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
154 void *p = RawInternalRealloc(addr, size, cache);
157 return (char*)p + sizeof(u64);
160 void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
161 if (CallocShouldReturnNullDueToOverflow(count, size))
162 return internal_allocator()->ReturnNullOrDieOnBadRequest();
163 void *p = InternalAlloc(count * size, cache);
164 if (p) internal_memset(p, 0, count * size);
168 void InternalFree(void *addr, InternalAllocatorCache *cache) {
171 addr = (char*)addr - sizeof(u64);
172 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
174 RawInternalFree(addr, cache);
178 static LowLevelAllocateCallback low_level_alloc_callback;
180 void *LowLevelAllocator::Allocate(uptr size) {
181 // Align allocation size.
182 size = RoundUpTo(size, 8);
183 if (allocated_end_ - allocated_current_ < (sptr)size) {
184 uptr size_to_allocate = Max(size, GetPageSizeCached());
186 (char*)MmapOrDie(size_to_allocate, __func__);
187 allocated_end_ = allocated_current_ + size_to_allocate;
188 if (low_level_alloc_callback) {
189 low_level_alloc_callback((uptr)allocated_current_,
193 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
194 void *res = allocated_current_;
195 allocated_current_ += size;
199 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
200 low_level_alloc_callback = callback;
203 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
204 if (!size) return false;
205 uptr max = (uptr)-1L;
206 return (max / size) < n;
209 static atomic_uint8_t reporting_out_of_memory = {0};
211 bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
213 void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
214 if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
215 Report("%s's allocator is terminating the process instead of returning 0\n",
217 Report("If you don't like this behavior set allocator_may_return_null=1\n");
222 } // namespace __sanitizer