}
void Initialize(const AllocatorOptions &options) {
- allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
+ SetAllocatorMayReturnNull(options.may_return_null);
+ allocator.Init(options.release_to_os_interval_ms);
SharedInitCode(options);
}
}
void ReInitialize(const AllocatorOptions &options) {
- allocator.SetMayReturnNull(options.may_return_null);
+ SetAllocatorMayReturnNull(options.may_return_null);
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
options->quarantine_size_mb = quarantine.GetSize() >> 20;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
- options->may_return_null = allocator.MayReturnNull();
+ options->may_return_null = AllocatorMayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
if (RssLimitExceeded())
- return allocator.ReturnNullOrDieOnOOM();
+ return AsanAllocator::FailureHandler::OnOOM();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDieOnBadRequest();
+ return AsanAllocator::FailureHandler::OnBadRequest();
}
AsanThread *t = GetCurrentThread();
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8);
}
-
- if (!allocated) return allocator.ReturnNullOrDieOnOOM();
+ if (!allocated)
+ return nullptr;
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDieOnBadRequest();
+ return AsanAllocator::FailureHandler::OnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.InitLinkerInitialized(
- common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(
- /* may_return_null */ false, kReleaseToOSIntervalNever);
+ internal_allocator_instance->Init(kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDieOnBadRequest();
+ return InternalAllocator::FailureHandler::OnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
return (max / size) < n;
}
-static atomic_uint8_t reporting_out_of_memory = {0};
+static atomic_uint8_t allocator_out_of_memory = {0};
+static atomic_uint8_t allocator_may_return_null = {0};
-bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+bool IsAllocatorOutOfMemory() {
+ return atomic_load_relaxed(&allocator_out_of_memory);
+}
-void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
- if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
Die();
}
+bool AllocatorMayReturnNull() {
+ return atomic_load(&allocator_may_return_null, memory_order_relaxed);
+}
+
+void SetAllocatorMayReturnNull(bool may_return_null) {
+ atomic_store(&allocator_may_return_null, may_return_null,
+ memory_order_relaxed);
+}
+
+void *ReturnNullOrDieOnFailure::OnBadRequest() {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+}
+
+void *ReturnNullOrDieOnFailure::OnOOM() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+}
+
+void *DieOnFailure::OnBadRequest() {
+ ReportAllocatorCannotReturnNull();
+}
+
+void *DieOnFailure::OnOOM() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+ ReportAllocatorCannotReturnNull();
+}
+
} // namespace __sanitizer
namespace __sanitizer {
-// Returns true if ReportAllocatorCannotReturnNull(true) was called.
-// Can be use to avoid memory hungry operations.
-bool IsReportingOOM();
+// Since flags are immutable and allocator behavior can be changed at runtime
+// (unit tests or ASan on Android are some examples), allocator_may_return_null
+// flag value is cached here and can be altered later.
+bool AllocatorMayReturnNull();
+void SetAllocatorMayReturnNull(bool may_return_null);
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
+// Allocator failure handling policies:
+// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
+// dies otherwise.
+struct ReturnNullOrDieOnFailure {
+ static void *OnBadRequest();
+ static void *OnOOM();
+};
+// Always dies on the failure.
+struct DieOnFailure {
+ static void *OnBadRequest();
+ static void *OnOOM();
+};
+
+// Returns true if allocator detected OOM condition. Can be used to avoid memory
+// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
+bool IsAllocatorOutOfMemory();
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
- primary_.Init(release_to_os_interval_ms);
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
+ typedef typename SecondaryAllocator::FailureHandler FailureHandler;
- void InitLinkerInitialized(
- bool may_return_null, s32 release_to_os_interval_ms) {
- secondary_.InitLinkerInitialized(may_return_null);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.InitLinkerInitialized();
stats_.InitLinkerInitialized();
- InitCommon(may_return_null, release_to_os_interval_ms);
}
- void Init(bool may_return_null, s32 release_to_os_interval_ms) {
- secondary_.Init(may_return_null);
+ void Init(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.Init();
stats_.Init();
- InitCommon(may_return_null, release_to_os_interval_ms);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
if (size == 0)
size = 1;
if (size + alignment < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
// alignment check.
if (alignment > 8)
size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
// The primary allocator should return a 2^x aligned allocation when
// requested 2^x bytes, hence using the rounded up 'size' when being
// serviced by the primary (this is no longer true when the primary is
// using a non-fixed base address). The secondary takes care of the
// alignment without such requirement, and allocating 'size' would use
// extraneous memory, so we employ 'original_size'.
- if (from_primary)
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (!res)
+ return FailureHandler::OnOOM();
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
return res;
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
s32 ReleaseToOSIntervalMs() const {
return primary_.ReleaseToOSIntervalMs();
}
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
};
InternalAllocatorCache;
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<> > InternalAllocator;
+ LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
+ > InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0);
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class FailureHandlerT = ReturnNullOrDieOnFailure>
class LargeMmapAllocator {
public:
- void InitLinkerInitialized(bool may_return_null) {
+ typedef FailureHandlerT FailureHandler;
+
+ void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void Init(bool may_return_null) {
+ void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
+ InitLinkerInitialized();
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
map_size += alignment;
// Overflow.
if (map_size < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
if (!map_beg)
- return ReturnNullOrDieOnOOM();
+ return FailureHandler::OnOOM();
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
return reinterpret_cast<void*>(res);
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (IsReportingOOM()) {
+ if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
}
void InitializeAllocator() {
- allocator()->Init(
- common_flags()->allocator_may_return_null,
- common_flags()->allocator_release_to_os_interval_ms);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);