atomic_load_relaxed(getConstAtomicHeader(Ptr));
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
if (UNLIKELY(NewUnpackedHeader->Checksum !=
- computeChecksum(Ptr, NewUnpackedHeader))) {
- dieWithMessage("ERROR: corrupted chunk header at address %p\n", Ptr);
- }
+ computeChecksum(Ptr, NewUnpackedHeader)))
+ dieWithMessage("corrupted chunk header at address %p\n", Ptr);
}
// Packs and stores the header, computing the checksum in the process.
PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
if (UNLIKELY(!atomic_compare_exchange_strong(
getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
- memory_order_relaxed))) {
- dieWithMessage("ERROR: race on chunk header at address %p\n", Ptr);
- }
+ memory_order_relaxed)))
+ dieWithMessage("race on chunk header at address %p\n", Ptr);
}
} // namespace Chunk
void Recycle(void *Ptr) {
UnpackedHeader Header;
Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkQuarantine)) {
- dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
- Ptr);
- }
+ if (UNLIKELY(Header.State != ChunkQuarantine))
+ dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
Chunk::eraseHeader(Ptr);
void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
if (Header.ClassId)
explicit ScudoAllocator(LinkerInitialized)
: AllocatorQuarantine(LINKER_INITIALIZED) {}
- void performSanityChecks() {
+ NOINLINE void performSanityChecks() {
// Verify that the header offset field can hold the maximum offset. In the
// case of the Secondary allocator, it takes care of alignment and the
// offset will always be 0. In the case of the Primary, the worst case
const uptr MaxOffset =
(MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
Header.Offset = MaxOffset;
- if (Header.Offset != MaxOffset) {
- dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
- "header\n");
- }
+ if (Header.Offset != MaxOffset)
+ dieWithMessage("maximum possible offset doesn't fit in header\n");
// Verify that we can fit the maximum size or amount of unused bytes in the
// header. Given that the Secondary fits the allocation to a page, the worst
// case scenario happens in the Primary. It will depend on the second to
// The following is an over-approximation that works for our needs.
const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
- if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
- dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
- "the header\n");
- }
+ if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
+ dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
const uptr LargestClassId = SizeClassMap::kLargestClassID;
Header.ClassId = LargestClassId;
- if (Header.ClassId != LargestClassId) {
- dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
- }
+ if (Header.ClassId != LargestClassId)
+ dieWithMessage("largest class ID doesn't fit in header\n");
}
void init() {
// RSS from /proc/self/statm by default. We might want to
// call getrusage directly, even if it's less accurate.
const uptr CurrentRssMb = GetRSS() >> 20;
- if (HardRssLimitMb && HardRssLimitMb < CurrentRssMb) {
- Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n",
- SanitizerToolName, HardRssLimitMb, CurrentRssMb);
- DumpProcessMap();
- Die();
- }
+ if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
+ dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
+ HardRssLimitMb, CurrentRssMb);
if (SoftRssLimitMb) {
if (atomic_load_relaxed(&RssLimitExceeded)) {
if (CurrentRssMb <= SoftRssLimitMb)
} else {
if (CurrentRssMb > SoftRssLimitMb) {
atomic_store_relaxed(&RssLimitExceeded, true);
- Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
- SanitizerToolName, SoftRssLimitMb, CurrentRssMb);
+ Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
+ SoftRssLimitMb, CurrentRssMb);
}
}
}
__sanitizer_free_hook(Ptr);
if (UNLIKELY(!Ptr))
return;
- if (UNLIKELY(!Chunk::isAligned(Ptr))) {
- dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
- "aligned at address %p\n", Ptr);
- }
+ if (UNLIKELY(!Chunk::isAligned(Ptr)))
+ dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
UnpackedHeader Header;
Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkAllocated)) {
- dieWithMessage("ERROR: invalid chunk state when deallocating address "
- "%p\n", Ptr);
- }
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
if (DeallocationTypeMismatch) {
// The deallocation type has to match the allocation one.
if (Header.AllocType != Type) {
// With the exception of memalign'd Chunks, that can be still be free'd.
- if (Header.AllocType != FromMemalign || Type != FromMalloc) {
- dieWithMessage("ERROR: allocation type mismatch when deallocating "
- "address %p\n", Ptr);
- }
+ if (Header.AllocType != FromMemalign || Type != FromMalloc)
+ dieWithMessage("allocation type mismatch when deallocating address "
+ "%p\n", Ptr);
}
}
const uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes;
if (DeleteSizeMismatch) {
- if (DeleteSize && DeleteSize != Size) {
- dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
+ if (DeleteSize && DeleteSize != Size)
+ dieWithMessage("invalid sized delete when deallocating address %p\n",
Ptr);
- }
}
quarantineOrDeallocateChunk(Ptr, &Header, Size);
}
// size still fits in the chunk.
void *reallocate(void *OldPtr, uptr NewSize) {
initThreadMaybe();
- if (UNLIKELY(!Chunk::isAligned(OldPtr))) {
- dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
- "aligned at address %p\n", OldPtr);
- }
+ if (UNLIKELY(!Chunk::isAligned(OldPtr)))
+ dieWithMessage("misaligned address when reallocating address %p\n",
+ OldPtr);
UnpackedHeader OldHeader;
Chunk::loadHeader(OldPtr, &OldHeader);
- if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
- dieWithMessage("ERROR: invalid chunk state when reallocating address "
- "%p\n", OldPtr);
- }
+ if (UNLIKELY(OldHeader.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when reallocating address %p\n",
+ OldPtr);
if (DeallocationTypeMismatch) {
- if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
- dieWithMessage("ERROR: allocation type mismatch when reallocating "
- "address %p\n", OldPtr);
- }
+ if (UNLIKELY(OldHeader.AllocType != FromMalloc))
+ dieWithMessage("allocation type mismatch when reallocating address "
+ "%p\n", OldPtr);
}
const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
// The new size still fits in the current chunk, and the size difference
UnpackedHeader Header;
Chunk::loadHeader(Ptr, &Header);
// Getting the usable size of a chunk only makes sense if it's allocated.
- if (UNLIKELY(Header.State != ChunkAllocated)) {
- dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
- Ptr);
- }
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
return Chunk::getUsableSize(Ptr, &Header);
}