Reviewed by eugenis offline, as reviews.llvm.org is down.
llvm-svn: 282805
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
- if (!allocated)
- return allocator.ReturnNullOrDie();
+ if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
+
#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDie();
+ return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
return (max / size) < n;
}
-void NORETURN ReportAllocatorCannotReturnNull() {
+static atomic_uint8_t reporting_out_of_memory = {0};
+
+bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
+ if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
namespace __sanitizer {
+// Returns true if ReportAllocatorCannotReturnNull(true) was called.
+// Can be use to avoid memory hungry operations.
+bool IsReportingOOM();
+
// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull();
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
+
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
- if (size + alignment < size)
- return ReturnNullOrDie();
- if (check_rss_limit && RssLimitIsExceeded())
- return ReturnNullOrDie();
+ if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
+ if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
return atomic_load(&may_return_null_, memory_order_acquire);
}
- void *ReturnNullOrDie() {
+ void *ReturnNullOrDieOnBadRequest() {
if (MayReturnNull())
return nullptr;
- ReportAllocatorCannotReturnNull();
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool may_return_null) {
if (alignment > page_size_)
map_size += alignment;
// Overflow.
- if (map_size < size)
- return ReturnNullOrDie();
+ if (map_size < size) return ReturnNullOrDieOnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_));
return reinterpret_cast<void*>(res);
}
- void *ReturnNullOrDie() {
- if (atomic_load(&may_return_null_, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull();
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool may_return_null) {
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
+ if (IsReportingOOM()) {
+ VReport(2, "Cannot use internal symbolizer: out of memory\n");
+ } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
list->push_back(tool);
return;
dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
}
if (Alignment > MaxAlignment)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
if (Alignment < MinAlignment)
Alignment = MinAlignment;
if (Size == 0)
Size = 1;
if (Size >= MaxAllowedMallocSize)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
uptr RoundedSize = RoundUpTo(Size, MinAlignment);
uptr ExtraBytes = ChunkHeaderSize;
if (Alignment > MinAlignment)
ExtraBytes += Alignment;
uptr NeededSize = RoundedSize + ExtraBytes;
if (NeededSize >= MaxAllowedMallocSize)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
void *Ptr;
if (LIKELY(!ThreadTornDown)) {
MinAlignment);
}
if (!Ptr)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnOOM();
// If requested, we will zero out the entire contents of the returned chunk.
if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
initThread();
uptr Total = NMemB * Size;
if (Size != 0 && Total / Size != NMemB) // Overflow check
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
void *Ptr = allocate(Total, MinAlignment, FromMalloc);
// If ZeroContents, the content of the chunk has already been zero'd out.
if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
return reinterpret_cast<void *>(Ptr);
}
- void *ReturnNullOrDie() {
+ void *ReturnNullOrDieOnBadRequest() {
if (atomic_load(&MayReturnNull, memory_order_acquire))
return nullptr;
- ReportAllocatorCannotReturnNull();
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (atomic_load(&MayReturnNull, memory_order_acquire))
+ return nullptr;
+ ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool AllocatorMayReturnNull) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);