1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator, 2-nd version.
11 // This variant uses the allocator from sanitizer_common, i.e. the one shared
12 // with ThreadSanitizer and MemorySanitizer.
14 //===----------------------------------------------------------------------===//
16 #include "asan_allocator.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_report.h"
20 #include "asan_stack.h"
21 #include "asan_thread.h"
22 #include "sanitizer_common/sanitizer_allocator_interface.h"
23 #include "sanitizer_common/sanitizer_flags.h"
24 #include "sanitizer_common/sanitizer_internal_defs.h"
25 #include "sanitizer_common/sanitizer_list.h"
26 #include "sanitizer_common/sanitizer_stackdepot.h"
27 #include "sanitizer_common/sanitizer_quarantine.h"
28 #include "lsan/lsan_common.h"
32 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
33 // We use adaptive redzones: for larger allocation larger redzones are used.
34 static u32 RZLog2Size(u32 rz_log) {
39 static u32 RZSize2Log(u32 rz_size) {
40 CHECK_GE(rz_size, 16);
41 CHECK_LE(rz_size, 2048);
42 CHECK(IsPowerOfTwo(rz_size));
43 u32 res = Log2(rz_size) - 4;
44 CHECK_EQ(rz_size, RZLog2Size(res));
48 static AsanAllocator &get_allocator();
50 // The memory chunk allocated from the underlying allocator looks like this:
51 // L L L L L L H H U U U U U U R R
52 // L -- left redzone words (0 or more bytes)
53 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
55 // R -- right redzone (0 or more bytes)
56 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
59 // If the left redzone is greater than the ChunkHeader size we store a magic
60 // value in the first uptr word of the memory block and store the address of
61 // ChunkBase in the next uptr.
62 // M B L L L L L L L L L H H U U U U U U
64 // ---------------------|
65 // M -- magic value kAllocBegMagic
66 // B -- address of ChunkHeader pointing to the first 'H'
67 static const uptr kAllocBegMagic = 0xCC6E96B9;
71 u32 chunk_state : 8; // Must be first.
75 u32 from_memalign : 1;
80 // This field is used for small sizes. For large sizes it is equal to
81 // SizeClassMap::kMaxSize and the actual size is stored in the
82 // SecondaryAllocator's metadata.
83 u32 user_requested_size;
87 struct ChunkBase : ChunkHeader {
88 // Header2, intersects with user memory.
92 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
93 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
94 COMPILER_CHECK(kChunkHeaderSize == 16);
95 COMPILER_CHECK(kChunkHeader2Size <= 16);
97 // Every chunk of memory allocated by this allocator can be in one of 3 states:
98 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
99 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
100 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
102 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
107 struct AsanChunk: ChunkBase {
108 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
109 uptr UsedSize(bool locked_version = false) {
110 if (user_requested_size != SizeClassMap::kMaxSize)
111 return user_requested_size;
112 return *reinterpret_cast<uptr *>(
113 get_allocator().GetMetaData(AllocBeg(locked_version)));
115 void *AllocBeg(bool locked_version = false) {
118 return get_allocator().GetBlockBeginFastLocked(
119 reinterpret_cast<void *>(this));
120 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
122 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
124 bool AddrIsInside(uptr addr, bool locked_version = false) {
125 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
129 struct QuarantineCallback {
130 explicit QuarantineCallback(AllocatorCache *cache)
134 void Recycle(AsanChunk *m) {
135 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
136 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
137 CHECK_NE(m->alloc_tid, kInvalidTid);
138 CHECK_NE(m->free_tid, kInvalidTid);
139 PoisonShadow(m->Beg(),
140 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
141 kAsanHeapLeftRedzoneMagic);
142 void *p = reinterpret_cast<void *>(m->AllocBeg());
144 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
145 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
146 // Clear the magic value, as allocator internals may overwrite the
147 // contents of deallocated chunk, confusing GetAsanChunk lookup.
149 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
153 AsanStats &thread_stats = GetCurrentThreadStats();
154 thread_stats.real_frees++;
155 thread_stats.really_freed += m->UsedSize();
157 get_allocator().Deallocate(cache_, p);
160 void *Allocate(uptr size) {
161 return get_allocator().Allocate(cache_, size, 1, false);
164 void Deallocate(void *p) {
165 get_allocator().Deallocate(cache_, p);
168 AllocatorCache *cache_;
171 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
172 typedef AsanQuarantine::Cache QuarantineCache;
174 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
175 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
177 AsanStats &thread_stats = GetCurrentThreadStats();
178 thread_stats.mmaps++;
179 thread_stats.mmaped += size;
181 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
182 PoisonShadow(p, size, 0);
183 // We are about to unmap a chunk of user memory.
184 // Mark the corresponding shadow memory as not needed.
185 FlushUnneededASanShadowMemory(p, size);
187 AsanStats &thread_stats = GetCurrentThreadStats();
188 thread_stats.munmaps++;
189 thread_stats.munmaped += size;
192 // We can not use THREADLOCAL because it is not supported on some of the
193 // platforms we care about (OSX 10.6, Android).
194 // static THREADLOCAL AllocatorCache cache;
195 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
197 return &ms->allocator_cache;
200 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
202 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
203 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
206 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
207 quarantine_size_mb = f->quarantine_size_mb;
208 min_redzone = f->redzone;
209 max_redzone = f->max_redzone;
210 may_return_null = cf->allocator_may_return_null;
211 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
212 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
215 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
216 f->quarantine_size_mb = quarantine_size_mb;
217 f->redzone = min_redzone;
218 f->max_redzone = max_redzone;
219 cf->allocator_may_return_null = may_return_null;
220 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
221 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
225 static const uptr kMaxAllowedMallocSize =
226 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
227 static const uptr kMaxThreadLocalQuarantine =
228 FIRST_32_SECOND_64(1 << 18, 1 << 20);
230 AsanAllocator allocator;
231 AsanQuarantine quarantine;
232 StaticSpinMutex fallback_mutex;
233 AllocatorCache fallback_allocator_cache;
234 QuarantineCache fallback_quarantine_cache;
236 // ------------------- Options --------------------------
237 atomic_uint16_t min_redzone;
238 atomic_uint16_t max_redzone;
239 atomic_uint8_t alloc_dealloc_mismatch;
241 // ------------------- Initialization ------------------------
242 explicit Allocator(LinkerInitialized)
243 : quarantine(LINKER_INITIALIZED),
244 fallback_quarantine_cache(LINKER_INITIALIZED) {}
246 void CheckOptions(const AllocatorOptions &options) const {
247 CHECK_GE(options.min_redzone, 16);
248 CHECK_GE(options.max_redzone, options.min_redzone);
249 CHECK_LE(options.max_redzone, 2048);
250 CHECK(IsPowerOfTwo(options.min_redzone));
251 CHECK(IsPowerOfTwo(options.max_redzone));
254 void SharedInitCode(const AllocatorOptions &options) {
255 CheckOptions(options);
256 quarantine.Init((uptr)options.quarantine_size_mb << 20,
257 kMaxThreadLocalQuarantine);
258 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
259 memory_order_release);
260 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
261 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
264 void Initialize(const AllocatorOptions &options) {
265 allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
266 SharedInitCode(options);
269 void RePoisonChunk(uptr chunk) {
270 // This could a user-facing chunk (with redzones), or some internal
271 // housekeeping chunk, like TransferBatch. Start by assuming the former.
272 AsanChunk *ac = GetAsanChunk((void *)chunk);
273 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
274 uptr beg = ac->Beg();
275 uptr end = ac->Beg() + ac->UsedSize(true);
276 uptr chunk_end = chunk + allocated_size;
277 if (chunk < beg && beg < end && end <= chunk_end) {
278 // Looks like a valid AsanChunk. Or maybe not. Be conservative and only
279 // poison the redzones.
280 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
281 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
282 FastPoisonShadowPartialRightRedzone(
283 end_aligned_down, end - end_aligned_down,
284 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
286 // This can not be an AsanChunk. Poison everything. It may be reused as
288 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
292 void ReInitialize(const AllocatorOptions &options) {
293 allocator.SetMayReturnNull(options.may_return_null);
294 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
295 SharedInitCode(options);
297 // Poison all existing allocation's redzones.
298 if (CanPoisonMemory()) {
299 allocator.ForceLock();
300 allocator.ForEachChunk(
301 [](uptr chunk, void *alloc) {
302 ((Allocator *)alloc)->RePoisonChunk(chunk);
305 allocator.ForceUnlock();
309 void GetOptions(AllocatorOptions *options) const {
310 options->quarantine_size_mb = quarantine.GetSize() >> 20;
311 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
312 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
313 options->may_return_null = allocator.MayReturnNull();
314 options->alloc_dealloc_mismatch =
315 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
316 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
319 // -------------------- Helper methods. -------------------------
320 uptr ComputeRZLog(uptr user_requested_size) {
322 user_requested_size <= 64 - 16 ? 0 :
323 user_requested_size <= 128 - 32 ? 1 :
324 user_requested_size <= 512 - 64 ? 2 :
325 user_requested_size <= 4096 - 128 ? 3 :
326 user_requested_size <= (1 << 14) - 256 ? 4 :
327 user_requested_size <= (1 << 15) - 512 ? 5 :
328 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
329 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
330 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
331 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
334 // We have an address between two chunks, and we want to report just one.
335 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
336 AsanChunk *right_chunk) {
337 // Prefer an allocated chunk over freed chunk and freed chunk
338 // over available chunk.
339 if (left_chunk->chunk_state != right_chunk->chunk_state) {
340 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
342 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
344 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
346 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
349 // Same chunk_state: choose based on offset.
350 sptr l_offset = 0, r_offset = 0;
351 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
352 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
353 if (l_offset < r_offset)
358 // -------------------- Allocation/Deallocation routines ---------------
359 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
360 AllocType alloc_type, bool can_fill) {
361 if (UNLIKELY(!asan_inited))
363 Flags &fl = *flags();
365 const uptr min_alignment = SHADOW_GRANULARITY;
366 if (alignment < min_alignment)
367 alignment = min_alignment;
369 // We'd be happy to avoid allocating memory for zero-size requests, but
370 // some programs/tests depend on this behavior and assume that malloc
371 // would not return NULL even for zero-size allocations. Moreover, it
372 // looks like operator new should never return NULL, and results of
373 // consecutive "new" calls must be different even if the allocated size
377 CHECK(IsPowerOfTwo(alignment));
378 uptr rz_log = ComputeRZLog(size);
379 uptr rz_size = RZLog2Size(rz_log);
380 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
381 uptr needed_size = rounded_size + rz_size;
382 if (alignment > min_alignment)
383 needed_size += alignment;
384 bool using_primary_allocator = true;
385 // If we are allocating from the secondary allocator, there will be no
386 // automatic right redzone, so add the right redzone manually.
387 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
388 needed_size += rz_size;
389 using_primary_allocator = false;
391 CHECK(IsAligned(needed_size, min_alignment));
392 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
393 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
395 return allocator.ReturnNullOrDieOnBadRequest();
398 AsanThread *t = GetCurrentThread();
400 bool check_rss_limit = true;
402 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
404 allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
406 SpinMutexLock l(&fallback_mutex);
407 AllocatorCache *cache = &fallback_allocator_cache;
409 allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
412 if (!allocated) return allocator.ReturnNullOrDieOnOOM();
414 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
415 // Heap poisoning is enabled, but the allocator provides an unpoisoned
416 // chunk. This is possible if CanPoisonMemory() was false for some
417 // time, for example, due to flags()->start_disabled.
418 // Anyway, poison the block before using it for anything else.
419 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
420 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
423 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
424 uptr alloc_end = alloc_beg + needed_size;
425 uptr beg_plus_redzone = alloc_beg + rz_size;
426 uptr user_beg = beg_plus_redzone;
427 if (!IsAligned(user_beg, alignment))
428 user_beg = RoundUpTo(user_beg, alignment);
429 uptr user_end = user_beg + size;
430 CHECK_LE(user_end, alloc_end);
431 uptr chunk_beg = user_beg - kChunkHeaderSize;
432 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
433 m->alloc_type = alloc_type;
435 u32 alloc_tid = t ? t->tid() : 0;
436 m->alloc_tid = alloc_tid;
437 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
438 m->free_tid = kInvalidTid;
439 m->from_memalign = user_beg != beg_plus_redzone;
440 if (alloc_beg != chunk_beg) {
441 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
442 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
443 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
445 if (using_primary_allocator) {
447 m->user_requested_size = size;
448 CHECK(allocator.FromPrimary(allocated));
450 CHECK(!allocator.FromPrimary(allocated));
451 m->user_requested_size = SizeClassMap::kMaxSize;
452 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
457 m->alloc_context_id = StackDepotPut(*stack);
459 uptr size_rounded_down_to_granularity =
460 RoundDownTo(size, SHADOW_GRANULARITY);
461 // Unpoison the bulk of the memory region.
462 if (size_rounded_down_to_granularity)
463 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
464 // Deal with the end of the region if size is not aligned to granularity.
465 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
467 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
468 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
471 AsanStats &thread_stats = GetCurrentThreadStats();
472 thread_stats.mallocs++;
473 thread_stats.malloced += size;
474 thread_stats.malloced_redzones += needed_size - size;
475 if (needed_size > SizeClassMap::kMaxSize)
476 thread_stats.malloc_large++;
478 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
480 void *res = reinterpret_cast<void *>(user_beg);
481 if (can_fill && fl.max_malloc_fill_size) {
482 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
483 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
485 #if CAN_SANITIZE_LEAKS
486 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
487 : __lsan::kDirectlyLeaked;
489 // Must be the last mutation of metadata in this function.
490 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
491 ASAN_MALLOC_HOOK(res, size);
495 // Set quarantine flag if chunk is allocated, issue ASan error report on
496 // available and quarantined chunks. Return true on success, false otherwise.
497 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
498 BufferedStackTrace *stack) {
499 u8 old_chunk_state = CHUNK_ALLOCATED;
500 // Flip the chunk_state atomically to avoid race on double-free.
501 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
503 memory_order_acquire)) {
504 ReportInvalidFree(ptr, old_chunk_state, stack);
505 // It's not safe to push a chunk in quarantine on invalid free.
508 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
512 // Expects the chunk to already be marked as quarantined by using
513 // AtomicallySetQuarantineFlagIfAllocated.
514 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
515 AllocType alloc_type) {
516 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
517 CHECK_GE(m->alloc_tid, 0);
518 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
519 CHECK_EQ(m->free_tid, kInvalidTid);
520 AsanThread *t = GetCurrentThread();
521 m->free_tid = t ? t->tid() : 0;
522 m->free_context_id = StackDepotPut(*stack);
523 // Poison the region.
524 PoisonShadow(m->Beg(),
525 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
528 AsanStats &thread_stats = GetCurrentThreadStats();
529 thread_stats.frees++;
530 thread_stats.freed += m->UsedSize();
532 // Push into quarantine.
534 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
535 AllocatorCache *ac = GetAllocatorCache(ms);
536 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
539 SpinMutexLock l(&fallback_mutex);
540 AllocatorCache *ac = &fallback_allocator_cache;
541 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
546 void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
547 AllocType alloc_type) {
548 uptr p = reinterpret_cast<uptr>(ptr);
551 uptr chunk_beg = p - kChunkHeaderSize;
552 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
555 // Must mark the chunk as quarantined before any changes to its metadata.
556 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
557 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
559 if (m->alloc_type != alloc_type) {
560 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
561 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
562 (AllocType)alloc_type);
566 if (delete_size && flags()->new_delete_type_mismatch &&
567 delete_size != m->UsedSize()) {
568 ReportNewDeleteSizeMismatch(p, delete_size, stack);
571 QuarantineChunk(m, ptr, stack, alloc_type);
574 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
575 CHECK(old_ptr && new_size);
576 uptr p = reinterpret_cast<uptr>(old_ptr);
577 uptr chunk_beg = p - kChunkHeaderSize;
578 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
580 AsanStats &thread_stats = GetCurrentThreadStats();
581 thread_stats.reallocs++;
582 thread_stats.realloced += new_size;
584 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
586 u8 chunk_state = m->chunk_state;
587 if (chunk_state != CHUNK_ALLOCATED)
588 ReportInvalidFree(old_ptr, chunk_state, stack);
589 CHECK_NE(REAL(memcpy), nullptr);
590 uptr memcpy_size = Min(new_size, m->UsedSize());
591 // If realloc() races with free(), we may start copying freed memory.
592 // However, we will report racy double-free later anyway.
593 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
594 Deallocate(old_ptr, 0, stack, FROM_MALLOC);
599 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
600 if (CallocShouldReturnNullDueToOverflow(size, nmemb))
601 return allocator.ReturnNullOrDieOnBadRequest();
602 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
603 // If the memory comes from the secondary allocator no need to clear it
604 // as it comes directly from mmap.
605 if (ptr && allocator.FromPrimary(ptr))
606 REAL(memset)(ptr, 0, nmemb * size);
610 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
611 if (chunk_state == CHUNK_QUARANTINE)
612 ReportDoubleFree((uptr)ptr, stack);
614 ReportFreeNotMalloced((uptr)ptr, stack);
617 void CommitBack(AsanThreadLocalMallocStorage *ms) {
618 AllocatorCache *ac = GetAllocatorCache(ms);
619 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
620 allocator.SwallowCache(ac);
623 // -------------------------- Chunk lookup ----------------------
625 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
626 AsanChunk *GetAsanChunk(void *alloc_beg) {
627 if (!alloc_beg) return nullptr;
628 if (!allocator.FromPrimary(alloc_beg)) {
629 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
630 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
633 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
634 if (alloc_magic[0] == kAllocBegMagic)
635 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
636 return reinterpret_cast<AsanChunk *>(alloc_beg);
639 AsanChunk *GetAsanChunkByAddr(uptr p) {
640 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
641 return GetAsanChunk(alloc_beg);
644 // Allocator must be locked when this function is called.
645 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
647 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
648 return GetAsanChunk(alloc_beg);
651 uptr AllocationSize(uptr p) {
652 AsanChunk *m = GetAsanChunkByAddr(p);
654 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
655 if (m->Beg() != p) return 0;
656 return m->UsedSize();
659 AsanChunkView FindHeapChunkByAddress(uptr addr) {
660 AsanChunk *m1 = GetAsanChunkByAddr(addr);
661 if (!m1) return AsanChunkView(m1);
663 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
664 // The address is in the chunk's left redzone, so maybe it is actually
665 // a right buffer overflow from the other chunk to the left.
666 // Search a bit to the left to see if there is another chunk.
667 AsanChunk *m2 = nullptr;
668 for (uptr l = 1; l < GetPageSizeCached(); l++) {
669 m2 = GetAsanChunkByAddr(addr - l);
670 if (m2 == m1) continue; // Still the same chunk.
673 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
674 m1 = ChooseChunk(addr, m2, m1);
676 return AsanChunkView(m1);
680 allocator.PrintStats();
684 allocator.ForceLock();
685 fallback_mutex.Lock();
689 fallback_mutex.Unlock();
690 allocator.ForceUnlock();
694 static Allocator instance(LINKER_INITIALIZED);
696 static AsanAllocator &get_allocator() {
697 return instance.allocator;
700 bool AsanChunkView::IsValid() {
701 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
703 bool AsanChunkView::IsAllocated() {
704 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
706 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
707 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
708 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
709 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
710 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
711 AllocType AsanChunkView::GetAllocType() {
712 return (AllocType)chunk_->alloc_type;
715 static StackTrace GetStackTraceFromId(u32 id) {
717 StackTrace res = StackDepotGet(id);
722 u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
723 u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
725 StackTrace AsanChunkView::GetAllocStack() {
726 return GetStackTraceFromId(GetAllocStackId());
729 StackTrace AsanChunkView::GetFreeStack() {
730 return GetStackTraceFromId(GetFreeStackId());
733 void InitializeAllocator(const AllocatorOptions &options) {
734 instance.Initialize(options);
737 void ReInitializeAllocator(const AllocatorOptions &options) {
738 instance.ReInitialize(options);
741 void GetAllocatorOptions(AllocatorOptions *options) {
742 instance.GetOptions(options);
745 AsanChunkView FindHeapChunkByAddress(uptr addr) {
746 return instance.FindHeapChunkByAddress(addr);
748 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
749 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
752 void AsanThreadLocalMallocStorage::CommitBack() {
753 instance.CommitBack(this);
756 void PrintInternalAllocatorStats() {
757 instance.PrintStats();
760 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
761 AllocType alloc_type) {
762 return instance.Allocate(size, alignment, stack, alloc_type, true);
765 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
766 instance.Deallocate(ptr, 0, stack, alloc_type);
769 void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
770 AllocType alloc_type) {
771 instance.Deallocate(ptr, size, stack, alloc_type);
774 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
775 return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
778 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
779 return instance.Calloc(nmemb, size, stack);
782 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
784 return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
786 instance.Deallocate(p, 0, stack, FROM_MALLOC);
789 return instance.Reallocate(p, size, stack);
792 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
793 return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
796 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
797 uptr PageSize = GetPageSizeCached();
798 size = RoundUpTo(size, PageSize);
800 // pvalloc(0) should allocate one page.
803 return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
806 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
807 BufferedStackTrace *stack) {
808 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
809 CHECK(IsAligned((uptr)ptr, alignment));
814 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
816 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
817 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
818 GET_STACK_TRACE_FATAL(pc, bp);
819 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
824 uptr asan_mz_size(const void *ptr) {
825 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
828 void asan_mz_force_lock() {
829 instance.ForceLock();
832 void asan_mz_force_unlock() {
833 instance.ForceUnlock();
836 void AsanSoftRssLimitExceededCallback(bool exceeded) {
837 instance.allocator.SetRssLimitIsExceeded(exceeded);
840 } // namespace __asan
842 // --- Implementation of LSan-specific functions --- {{{1
844 void LockAllocator() {
845 __asan::get_allocator().ForceLock();
848 void UnlockAllocator() {
849 __asan::get_allocator().ForceUnlock();
852 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
853 *begin = (uptr)&__asan::get_allocator();
854 *end = *begin + sizeof(__asan::get_allocator());
857 uptr PointsIntoChunk(void* p) {
858 uptr addr = reinterpret_cast<uptr>(p);
859 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
861 uptr chunk = m->Beg();
862 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
864 if (m->AddrIsInside(addr, /*locked_version=*/true))
866 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
872 uptr GetUserBegin(uptr chunk) {
873 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
878 LsanMetadata::LsanMetadata(uptr chunk) {
879 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
882 bool LsanMetadata::allocated() const {
883 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
884 return m->chunk_state == __asan::CHUNK_ALLOCATED;
887 ChunkTag LsanMetadata::tag() const {
888 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
889 return static_cast<ChunkTag>(m->lsan_tag);
892 void LsanMetadata::set_tag(ChunkTag value) {
893 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
897 uptr LsanMetadata::requested_size() const {
898 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
899 return m->UsedSize(/*locked_version=*/true);
902 u32 LsanMetadata::stack_trace_id() const {
903 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
904 return m->alloc_context_id;
907 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
908 __asan::get_allocator().ForEachChunk(callback, arg);
911 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
912 uptr addr = reinterpret_cast<uptr>(p);
913 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
914 if (!m) return kIgnoreObjectInvalid;
915 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
916 if (m->lsan_tag == kIgnored)
917 return kIgnoreObjectAlreadyIgnored;
918 m->lsan_tag = __lsan::kIgnored;
919 return kIgnoreObjectSuccess;
921 return kIgnoreObjectInvalid;
924 } // namespace __lsan
926 // ---------------------- Interface ---------------- {{{1
927 using namespace __asan; // NOLINT
929 // ASan allocator doesn't reserve extra bytes, so normally we would
930 // just return "size". We don't want to expose our redzone sizes, etc here.
931 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
935 int __sanitizer_get_ownership(const void *p) {
936 uptr ptr = reinterpret_cast<uptr>(p);
937 return instance.AllocationSize(ptr) > 0;
940 uptr __sanitizer_get_allocated_size(const void *p) {
942 uptr ptr = reinterpret_cast<uptr>(p);
943 uptr allocated_size = instance.AllocationSize(ptr);
944 // Die if p is not malloced or if it is already freed.
945 if (allocated_size == 0) {
946 GET_STACK_TRACE_FATAL_HERE;
947 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
949 return allocated_size;
952 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
953 // Provide default (no-op) implementation of malloc hooks.
955 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
956 void __sanitizer_malloc_hook(void *ptr, uptr size) {
960 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
961 void __sanitizer_free_hook(void *ptr) {