#include "flags_parser.h"
#include "local_cache.h"
#include "memtag.h"
+#include "options.h"
#include "quarantine.h"
#include "report.h"
#include "secondary.h"
reportUnrecognizedFlags();
// Store some flags locally.
- Options.MayReturnNull = getFlags()->may_return_null;
- Options.FillContents =
- getFlags()->zero_contents
- ? ZeroFill
- : (getFlags()->pattern_fill_contents ? PatternOrZeroFill : NoFill);
- Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
- Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
- Options.TrackAllocationStacks = false;
- Options.UseOddEvenTags = true;
- Options.QuarantineMaxChunkSize =
+ if (getFlags()->may_return_null)
+ Primary.Options.set(OptionBit::MayReturnNull);
+ if (getFlags()->zero_contents)
+ Primary.Options.setFillContentsMode(ZeroFill);
+ else if (getFlags()->pattern_fill_contents)
+ Primary.Options.setFillContentsMode(PatternOrZeroFill);
+ if (getFlags()->dealloc_type_mismatch)
+ Primary.Options.set(OptionBit::DeallocTypeMismatch);
+ if (getFlags()->delete_size_mismatch)
+ Primary.Options.set(OptionBit::DeleteSizeMismatch);
+ Primary.Options.set(OptionBit::UseOddEvenTags);
+
+ QuarantineMaxChunkSize =
static_cast<u32>(getFlags()->quarantine_max_chunk_size);
Stats.initLinkerInitialized();
#endif
}
- uptr computeOddEvenMaskForPointerMaybe(uptr Ptr, uptr Size) {
- if (!Options.UseOddEvenTags)
+ uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr, uptr Size) {
+ if (!Options.get(OptionBit::UseOddEvenTags))
return 0;
// If a chunk's tag is odd, we want the tags of the surrounding blocks to be
uptr Alignment = MinAlignment,
bool ZeroContents = false) {
initThreadMaybe();
+ Options Options = Primary.Options.load();
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.shouldSample())) {
const FillContentsMode FillContents = ZeroContents ? ZeroFill
: TSDRegistry.getDisableMemInit()
? NoFill
- : Options.FillContents;
+ : Options.getFillContentsMode();
if (UNLIKELY(Alignment > MaxAlignment)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportAlignmentTooBig(Alignment, MaxAlignment);
}
// Takes care of extravagantly large sizes as well as integer overflows.
static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
}
FillContents);
if (UNLIKELY(!Block)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportOutOfMemory(NeededSize);
}
//
// When memory tagging is enabled, zeroing the contents is done as part of
// setting the tag.
- if (UNLIKELY(useMemoryTagging())) {
+ if (UNLIKELY(useMemoryTagging(Options))) {
uptr PrevUserPtr;
Chunk::UnpackedHeader Header;
const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
}
} else {
const uptr OddEvenMask =
- computeOddEvenMaskForPointerMaybe(BlockUptr, BlockSize);
+ computeOddEvenMaskForPointerMaybe(Options, BlockUptr, BlockSize);
TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
}
- storeAllocationStackMaybe(Ptr);
+ storeAllocationStackMaybe(Options, Ptr);
} else if (UNLIKELY(FillContents != NoFill)) {
// This condition is not necessarily unlikely, but since memset is
// costly, we might as well mark it as such.
// the TLS destructors, ending up in initialized thread specific data never
// being destroyed properly. Any other heap operation will do a full init.
initThreadMaybe(/*MinimalInit=*/true);
+ Options Options = Primary.Options.load();
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
if (UNLIKELY(Header.State != Chunk::State::Allocated))
reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
- if (Options.DeallocTypeMismatch) {
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
if (Header.OriginOrWasZeroed != Origin) {
// With the exception of memalign'd chunks, that can be still be free'd.
if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
}
const uptr Size = getSize(Ptr, &Header);
- if (DeleteSize && Options.DeleteSizeMismatch) {
+ if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
if (UNLIKELY(DeleteSize != Size))
reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
}
- quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ quarantineOrDeallocateChunk(Options, Ptr, &Header, Size);
}
void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
initThreadMaybe();
+ Options Options = Primary.Options.load();
if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
}
// Pointer has to be allocated with a malloc-type function. Some
// applications think that it is OK to realloc a memalign'ed pointer, which
// will trigger this check. It really isn't.
- if (Options.DeallocTypeMismatch) {
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
OldHeader.OriginOrWasZeroed,
: BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
- if (UNLIKELY(ClassId && useMemoryTagging())) {
+ if (UNLIKELY(ClassId && useMemoryTagging(Options))) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
BlockEnd);
- storeAllocationStackMaybe(OldPtr);
+ storeAllocationStackMaybe(Options, OldPtr);
}
return OldTaggedPtr;
}
if (NewPtr) {
const uptr OldSize = getSize(OldPtr, &OldHeader);
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
- quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ quarantineOrDeallocateChunk(Options, OldPtr, &OldHeader, OldSize);
}
return NewPtr;
}
if (getChunkFromBlock(Block, &Chunk, &Header) &&
Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
- if (useMemoryTagging())
+ if (useMemoryTagging(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
Arg);
bool canReturnNull() {
initThreadMaybe();
- return Options.MayReturnNull;
+ return Primary.Options.load().get(OptionBit::MayReturnNull);
}
bool setOption(Option O, sptr Value) {
// any particular chunk is cut in half. Therefore we use this tuning
// setting to control whether odd/even tags are enabled.
if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
- Options.UseOddEvenTags = true;
+ Primary.Options.set(OptionBit::UseOddEvenTags);
else if (Value == M_MEMTAG_TUNING_UAF)
- Options.UseOddEvenTags = false;
+ Primary.Options.clear(OptionBit::UseOddEvenTags);
return true;
} else {
// We leave it to the various sub-components to decide whether or not they
Header.State == Chunk::State::Allocated;
}
- bool useMemoryTagging() { return Primary.useMemoryTagging(); }
+ bool useMemoryTagging() const {
+ return useMemoryTagging(Primary.Options.load());
+ }
+ static bool useMemoryTagging(Options Options) {
+ return PrimaryT::useMemoryTagging(Options);
+ }
void disableMemoryTagging() { Primary.disableMemoryTagging(); }
void setTrackAllocationStacks(bool Track) {
initThreadMaybe();
- Options.TrackAllocationStacks = Track;
+ if (Track)
+ Primary.Options.set(OptionBit::TrackAllocationStacks);
+ else
+ Primary.Options.clear(OptionBit::TrackAllocationStacks);
}
void setFillContents(FillContentsMode FillContents) {
initThreadMaybe();
- Options.FillContents = FillContents;
+ Primary.Options.setFillContentsMode(FillContents);
}
const char *getStackDepotAddress() const {
static const uptr MaxTraceSize = 64;
u32 Cookie;
-
- struct {
- u8 MayReturnNull : 1; // may_return_null
- FillContentsMode FillContents : 2; // zero_contents, pattern_fill_contents
- u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
- u8 DeleteSizeMismatch : 1; // delete_size_mismatch
- u8 TrackAllocationStacks : 1;
- u8 UseOddEvenTags : 1;
- u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
- } Options;
+ u32 QuarantineMaxChunkSize;
GlobalStats Stats;
PrimaryT Primary;
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
- void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
- uptr Size) {
+ void quarantineOrDeallocateChunk(Options Options, void *Ptr,
+ Chunk::UnpackedHeader *Header, uptr Size) {
Chunk::UnpackedHeader NewHeader = *Header;
- if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) {
+ if (UNLIKELY(NewHeader.ClassId && useMemoryTagging(Options))) {
u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr)));
if (!TSDRegistry.getDisableMemInit()) {
uptr TaggedBegin, TaggedEnd;
const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
- reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
SizeClassMap::getSizeByClassId(NewHeader.ClassId));
// Exclude the previous tag so that immediate use after free is detected
// 100% of the time.
&TaggedEnd);
}
NewHeader.OriginOrWasZeroed = !TSDRegistry.getDisableMemInit();
- storeDeallocationStackMaybe(Ptr, PrevTag);
+ storeDeallocationStackMaybe(Options, Ptr, PrevTag);
}
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// Logical Or can be short-circuited, which introduces unnecessary
// conditional jumps, so use bitwise Or and let the compiler be clever.
- const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size |
- (Size > Options.QuarantineMaxChunkSize);
+ const bool BypassQuarantine =
+ !Quarantine.getCacheSize() | !Size | (Size > QuarantineMaxChunkSize);
if (BypassQuarantine) {
NewHeader.State = Chunk::State::Available;
Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
return Offset + Chunk::getHeaderSize();
}
- void storeAllocationStackMaybe(void *Ptr) {
- if (!UNLIKELY(Options.TrackAllocationStacks))
+ void storeAllocationStackMaybe(Options Options, void *Ptr) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
Ptr32[MemTagAllocationTidIndex] = getThreadID();
}
- void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) {
- if (!UNLIKELY(Options.TrackAllocationStacks))
+ void storeDeallocationStackMaybe(Options Options, void *Ptr,
+ uint8_t PrevTag) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
// Disable tag checks here so that we don't need to worry about zero sized