list(APPEND SCUDO_CFLAGS -O3)
endif()
+append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
+ SCUDO_CFLAGS)
+
set(SCUDO_LINK_FLAGS)
list(APPEND SCUDO_LINK_FLAGS -Wl,-z,defs,-z,now,-z,relro)
NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
uptr Alignment = MinAlignment,
- bool ZeroContents = false) {
+ bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
const Options Options = Primary.Options.load();
if (UnlockRequired)
TSD->unlock();
}
- if (UNLIKELY(ClassId == 0))
+ if (UNLIKELY(ClassId == 0)) {
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
FillContents);
+ }
if (UNLIKELY(!Block)) {
if (Options.get(OptionBit::MayReturnNull))
// TODO(kostyak): disable() is currently best-effort. There are some small
// windows of time when an allocation could still succeed after
// this function finishes. We will revisit that later.
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
#ifdef GWP_ASAN_HOOKS
GuardedAlloc.disable();
Secondary.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
Secondary.enable();
Primary.enable();
}
void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
- Chunk::UnpackedHeader *Header, uptr Size) {
+ Chunk::UnpackedHeader *Header,
+ uptr Size) NO_THREAD_SAFETY_ANALYSIS {
void *Ptr = getHeaderTaggedPointer(TaggedPtr);
Chunk::UnpackedHeader NewHeader = *Header;
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
sync_mutex_unlock(&M);
}
+void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}
+
u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
#if SCUDO_LINUX
#include "common.h"
+#include "internal_defs.h"
#include "linux.h"
#include "mutex.h"
#include "string_utils.h"
}
}
+void HybridMutex::assertHeldImpl() {
+ CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
+}
+
u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
#include "atomic_helpers.h"
#include "common.h"
+#include "thread_annotations.h"
#include <string.h>
namespace scudo {
-class HybridMutex {
+class CAPABILITY("mutex") HybridMutex {
public:
- bool tryLock();
- NOINLINE void lock() {
+ bool tryLock() TRY_ACQUIRE(true);
+ NOINLINE void lock() ACQUIRE() {
if (LIKELY(tryLock()))
return;
// The compiler may try to fully unroll the loop, ending up in a
}
lockSlow();
}
- void unlock();
+ void unlock() RELEASE();
+
+ // TODO(chiahungduan): In general, we may want to assert the owner of lock as
+ // well. Given the current uses of HybridMutex, it's acceptable without
+ // asserting the owner. Re-evaluate this when we have certain scenarios which
+ // requires a more fine-grained lock granularity.
+ ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
+ if (SCUDO_DEBUG)
+ assertHeldImpl();
+ }
private:
+ void assertHeldImpl();
+
static constexpr u8 NumberOfTries = 8U;
static constexpr u8 NumberOfYields = 8U;
sync_mutex_t M = {};
#endif
- void lockSlow();
+ void lockSlow() ACQUIRE();
};
-class ScopedLock {
+class SCOPED_CAPABILITY ScopedLock {
public:
- explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
- ~ScopedLock() { Mutex.unlock(); }
+ explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() RELEASE() { Mutex.unlock(); }
private:
HybridMutex &Mutex;
#include "report.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
if (SCUDO_FUCHSIA)
reportError("SizeClassAllocator32 is not supported on Fuchsia");
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void unmapTestOnly() {
+ void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
while (NumberOfStashedRegions > 0)
unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
RegionSize);
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
- TransferBatch *B = popBatchImpl(C, ClassId);
+ TransferBatch *B = popBatchImpl(C, ClassId, Sci);
if (UNLIKELY(!B)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
return nullptr;
- B = popBatchImpl(C, ClassId);
+ B = popBatchImpl(C, ClassId, Sci);
// if `populateFreeList` succeeded, we are supposed to get free blocks.
DCHECK_NE(B, nullptr);
}
// the blocks.
if (Size == 1 && !populateFreeList(C, ClassId, Sci))
return;
- pushBlocksImpl(C, ClassId, Array, Size);
+ pushBlocksImpl(C, ClassId, Sci, Array, Size);
Sci->Stats.PushedBlocks += Size;
return;
}
}
ScopedLock L(Sci->Mutex);
- pushBlocksImpl(C, ClassId, Array, Size, SameGroup);
+ pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
Sci->Stats.PushedBlocks += Size;
if (ClassId != SizeClassMap::BatchClassId)
releaseToOSMaybe(Sci, ClassId);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
PossibleRegions.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
PossibleRegions.enable();
RegionsStashMutex.unlock();
getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
}
}
- template <typename F> void iterateOverBlocks(F Callback) {
+ template <typename F>
+ void iterateOverBlocks(F Callback) NO_THREAD_SAFETY_ANALYSIS {
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
- getStats(Str, I, 0);
+ getStats(Str, I, Sci, 0);
}
}
struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
HybridMutex Mutex;
- SinglyLinkedList<BatchGroup> FreeList;
- uptr CurrentRegion;
- uptr CurrentRegionAllocated;
- SizeClassStats Stats;
+ SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
+ uptr CurrentRegion GUARDED_BY(Mutex);
+ uptr CurrentRegionAllocated GUARDED_BY(Mutex);
+ SizeClassStats Stats GUARDED_BY(Mutex);
u32 RandState;
- uptr AllocatedUser;
+ uptr AllocatedUser GUARDED_BY(Mutex);
// Lowest & highest region index allocated for this size class, to avoid
// looping through the whole NumRegions.
- uptr MinRegionIndex;
- uptr MaxRegionIndex;
- ReleaseToOsInfo ReleaseInfo;
+ uptr MinRegionIndex GUARDED_BY(Mutex);
+ uptr MaxRegionIndex GUARDED_BY(Mutex);
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
return Region;
}
- uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
// `SameGroup=true` instead.
//
// The region mutex needs to be held while calling this method.
- void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size,
- bool SameGroup = false) {
+ void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Sci->Mutex) {
DCHECK_GT(Size, 0U);
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
auto CreateGroup = [&](uptr GroupId) {
BatchGroup *BG = nullptr;
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
- TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) {
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
if (Sci->FreeList.empty())
return nullptr;
return B;
}
- NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci) {
+ NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
uptr Region;
uptr Offset;
// If the size-class currently has a region associated to it, use it. The
// it only happens when it crosses the group size boundary. Instead of
// sorting them, treat them as same group here to avoid sorting the
// almost-sorted blocks.
- pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true);
+ pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[I], N, /*SameGroup=*/true);
I += N;
}
return true;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci, uptr Rss)
+ REQUIRES(Sci->Mutex) {
if (Sci->AllocatedUser == 0)
return;
const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
}
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
- bool Force = false) {
+ bool Force = false) REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
+ // FIXME: There is no dedicated lock for `PossibleRegions`.
ByteMap PossibleRegions = {};
atomic_s32 ReleaseToOsIntervalMs = {};
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
HybridMutex RegionsStashMutex;
- uptr NumberOfStashedRegions = 0;
- uptr RegionsStash[MaxStashedRegions] = {};
+ uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
+ uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
};
} // namespace scudo
#include "release.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
DCHECK_EQ(PrimaryBase, 0U);
// Reserve the space required for the Primary.
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void unmapTestOnly() {
+ void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
*Region = {};
bool PrintStats = false;
{
ScopedLock L(Region->Mutex);
- TransferBatch *B = popBatchImpl(C, ClassId);
+ TransferBatch *B = popBatchImpl(C, ClassId, Region);
if (LIKELY(B)) {
Region->Stats.PoppedBlocks += B->getCount();
return B;
!populateFreeList(C, ClassId, Region))) {
PrintStats = !RegionIsExhausted && Region->Exhausted;
} else {
- B = popBatchImpl(C, ClassId);
+ B = popBatchImpl(C, ClassId, Region);
// if `populateFreeList` succeeded, we are supposed to get free blocks.
DCHECK_NE(B, nullptr);
Region->Stats.PoppedBlocks += B->getCount();
// be less than two. Therefore, populate the free list before inserting
// the blocks.
if (Size >= 2U) {
- pushBlocksImpl(C, SizeClassMap::BatchClassId, Array, Size);
+ pushBlocksImpl(C, SizeClassMap::BatchClassId, Region, Array, Size);
Region->Stats.PushedBlocks += Size;
} else {
const bool RegionIsExhausted = Region->Exhausted;
}
ScopedLock L(Region->Mutex);
- pushBlocksImpl(C, ClassId, Array, Size, SameGroup);
+ pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
Region->Stats.PushedBlocks += Size;
if (ClassId != SizeClassMap::BatchClassId)
releaseToOSMaybe(Region, ClassId);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
}
}
- template <typename F> void iterateOverBlocks(F Callback) {
+ template <typename F>
+ void iterateOverBlocks(F Callback) NO_THREAD_SAFETY_ANALYSIS {
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
- const RegionInfo *Region = getRegionInfo(I);
+ RegionInfo *Region = getRegionInfo(I);
const uptr BlockSize = getSizeByClassId(I);
const uptr From = Region->RegionBeg;
const uptr To = From + Region->AllocatedUser;
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
ScopedLock L(Region->Mutex);
- getStats(Str, I, 0);
+ getStats(Str, I, Region, 0);
}
}
decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
}
- static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
+ static BlockInfo findNearestBlock(const char *RegionInfoData,
+ uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
const RegionInfo *RegionInfoArray =
reinterpret_cast<const RegionInfo *>(RegionInfoData);
+
uptr ClassId;
uptr MinDistance = -1UL;
for (uptr I = 0; I != NumClasses; ++I) {
if (I == SizeClassMap::BatchClassId)
continue;
uptr Begin = RegionInfoArray[I].RegionBeg;
+ // TODO(chiahungduan): In fact, We need to lock the RegionInfo::Mutex.
+ // However, the RegionInfoData is passed with const qualifier and lock the
+ // mutex requires modifying RegionInfoData, which means we need to remove
+ // the const qualifier. This may lead to another undefined behavior (The
+ // first one is accessing `AllocatedUser` without locking. It's better to
+ // pass `RegionInfoData` as `void *` then we can lock the mutex properly.
uptr End = Begin + RegionInfoArray[I].AllocatedUser;
if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
continue;
struct UnpaddedRegionInfo {
HybridMutex Mutex;
- SinglyLinkedList<BatchGroup> FreeList;
+ SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
+ // This is initialized before thread creation.
uptr RegionBeg = 0;
- RegionStats Stats = {};
- u32 RandState = 0;
- uptr MappedUser = 0; // Bytes mapped for user memory.
- uptr AllocatedUser = 0; // Bytes allocated for user memory.
- MapPlatformData Data = {};
- ReleaseToOsInfo ReleaseInfo = {};
- bool Exhausted = false;
+ RegionStats Stats GUARDED_BY(Mutex) = {};
+ u32 RandState GUARDED_BY(Mutex) = 0;
+ // Bytes mapped for user memory.
+ uptr MappedUser GUARDED_BY(Mutex) = 0;
+ // Bytes allocated for user memory.
+ uptr AllocatedUser GUARDED_BY(Mutex) = 0;
+ MapPlatformData Data GUARDED_BY(Mutex) = {};
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex) = {};
+ bool Exhausted GUARDED_BY(Mutex) = false;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
// `SameGroup=true` instead.
//
// The region mutex needs to be held while calling this method.
- void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size,
- bool SameGroup = false) {
+ void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Region->Mutex) {
DCHECK_GT(Size, 0U);
- RegionInfo *Region = getRegionInfo(ClassId);
auto CreateGroup = [&](uptr GroupId) {
BatchGroup *BG = nullptr;
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
- TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) {
- RegionInfo *Region = getRegionInfo(ClassId);
+ TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->Mutex) {
if (Region->FreeList.empty())
return nullptr;
return B;
}
- NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, RegionInfo *Region) {
+ NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->Mutex) {
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = TransferBatch::getMaxCached(Size);
// it only happens when it crosses the group size boundary. Instead of
// sorting them, treat them as same group here to avoid sorting the
// almost-sorted blocks.
- pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true);
+ pushBlocksImpl(C, ClassId, Region, &ShuffleArray[I], N,
+ /*SameGroup=*/true);
I += N;
}
return true;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
- RegionInfo *Region = getRegionInfo(ClassId);
+ void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region, uptr Rss)
+ REQUIRES(Region->Mutex) {
if (Region->MappedUser == 0)
return;
const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
}
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
- bool Force = false) {
+ bool Force = false) REQUIRES(Region->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
#include "list.h"
#include "mutex.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
typedef QuarantineCache<Callback> CacheT;
using ThisT = GlobalQuarantine<Callback, Node>;
- void init(uptr Size, uptr CacheSize) {
+ void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
drain(C, Cb);
}
- void NOINLINE drain(CacheT *C, Callback Cb) {
+ void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
+ bool needRecycle = false;
{
ScopedLock L(CacheMutex);
Cache.transfer(C);
+ needRecycle = Cache.getSize() > getMaxSize();
}
- if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
+
+ if (needRecycle && RecycleMutex.tryLock())
recycle(atomic_load_relaxed(&MinSize), Cb);
}
- void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
{
ScopedLock L(CacheMutex);
Cache.transfer(C);
recycle(0, Cb);
}
- void getStats(ScopedString *Str) {
+ void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
ScopedLock L(CacheMutex);
// It assumes that the world is stopped, just as the allocator's printStats.
Cache.getStats(Str);
getMaxSize() >> 10, getCacheSize() >> 10);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
RecycleMutex.lock();
CacheMutex.lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
CacheMutex.unlock();
RecycleMutex.unlock();
}
private:
// Read-only data.
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
- CacheT Cache;
+ CacheT Cache GUARDED_BY(CacheMutex);
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
atomic_uptr MinSize = {};
atomic_uptr MaxSize = {};
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
- void NOINLINE recycle(uptr MinSize, Callback Cb) {
+ void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
+ EXCLUDES(CacheMutex) {
CacheT Tmp;
Tmp.init();
{
#include "common.h"
#include "list.h"
#include "mutex.h"
+#include "thread_annotations.h"
namespace scudo {
Buffer = nullptr;
}
- void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
+ // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
+ // specify the thread-safety attribute properly in current code structure.
+ // Besides, it's the only place we may want to check thread safety. Therefore,
+ // it's fine to bypass the thread-safety analysis now.
+ void reset(uptr NumberOfRegion, uptr CountersPerRegion,
+ uptr MaxValue) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_GT(NumberOfRegion, 0);
DCHECK_GT(CountersPerRegion, 0);
DCHECK_GT(MaxValue, 0);
[[no_unique_address]] MapPlatformData MapData = {};
static HybridMutex Mutex;
- static uptr StaticBuffer[StaticBufferCount];
+ static uptr StaticBuffer[StaticBufferCount] GUARDED_BY(Mutex);
};
template <class ReleaseRecorderT> class FreePagesRangeTracker {
#include "options.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
Config::SecondaryCacheEntriesArraySize,
"");
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(EntriesCount, 0U);
setOption(Option::MaxCacheEntriesCount,
static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void store(Options Options, LargeBlock::Header *H) {
+ void store(Options Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
if (!canCache(H->CommitSize))
return unmap(H);
}
bool retrieve(Options Options, uptr Size, uptr Alignment,
- LargeBlock::Header **H, bool *Zeroed) {
+ LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
bool Found = false;
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
continue;
+ }
Found = true;
Entry = Entries[I];
Entries[I].CommitBase = 0;
(*H)->MapBase = Entry.MapBase;
(*H)->MapSize = Entry.MapSize;
(*H)->Data = Entry.Data;
+
+ ScopedLock L(Mutex);
EntriesCount--;
}
return Found;
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
- void disableMemoryTagging() {
+ void disableMemoryTagging() EXCLUDES(Mutex) {
ScopedLock L(Mutex);
for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
if (Quarantine[I].CommitBase) {
QuarantinePos = -1U;
}
- void disable() { Mutex.lock(); }
+ void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
- void enable() { Mutex.unlock(); }
+ void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
void unmapTestOnly() { empty(); }
u64 Time;
};
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
+ void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
if (!Entry.CommitBase || !Entry.Time)
return;
if (Entry.Time > Time) {
Entry.Time = 0;
}
- void releaseOlderThan(u64 Time) {
+ void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
return;
}
HybridMutex Mutex;
- u32 EntriesCount = 0;
- u32 QuarantinePos = 0;
+ u32 EntriesCount GUARDED_BY(Mutex) = 0;
+ u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
- u64 OldestTime = 0;
- u32 IsFullEvents = 0;
+ u64 OldestTime GUARDED_BY(Mutex) = 0;
+ u32 IsFullEvents GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
- CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
+ CachedBlock
+ Entries[Config::SecondaryCacheEntriesArraySize] GUARDED_BY(Mutex) = {};
NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
- Quarantine = {};
+ Quarantine GUARDED_BY(Mutex) = {};
};
template <typename Config> class MapAllocator {
public:
- void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
+ void init(GlobalStats *S,
+ s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(AllocatedBytes, 0U);
DCHECK_EQ(FreedBytes, 0U);
Cache.init(ReleaseToOsInterval);
void getStats(ScopedString *Str);
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
Cache.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
Cache.enable();
Mutex.unlock();
}
- template <typename F> void iterateOverBlocks(F Callback) const {
+ template <typename F>
+ void iterateOverBlocks(F Callback) const NO_THREAD_SAFETY_ANALYSIS {
for (const auto &H : InUseBlocks) {
uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
if (allocatorSupportsMemoryTagging<Config>())
private:
typename Config::SecondaryCache Cache;
- HybridMutex Mutex;
- DoublyLinkedList<LargeBlock::Header> InUseBlocks;
- uptr AllocatedBytes = 0;
- uptr FreedBytes = 0;
- uptr LargestSize = 0;
- u32 NumberOfAllocs = 0;
- u32 NumberOfFrees = 0;
- LocalStats Stats;
+ mutable HybridMutex Mutex;
+ DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
+ uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
+ uptr FreedBytes GUARDED_BY(Mutex) = 0;
+ uptr LargestSize GUARDED_BY(Mutex) = 0;
+ u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
+ u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
+ LocalStats Stats GUARDED_BY(Mutex);
};
// As with the Primary, the size passed to this function includes any desired
}
template <typename Config>
-void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
+void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
+ EXCLUDES(Mutex) {
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
const uptr CommitSize = H->CommitSize;
{
}
template <typename Config>
-void MapAllocator<Config>::getStats(ScopedString *Str) {
+void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
"(%zuK), remains %u (%zuK) max %zuM\n",
#include "atomic_helpers.h"
#include "list.h"
#include "mutex.h"
+#include "thread_annotations.h"
#include <string.h>
public:
void init() { LocalStats::init(); }
- void link(LocalStats *S) {
+ void link(LocalStats *S) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
StatsList.push_back(S);
}
- void unlink(LocalStats *S) {
+ void unlink(LocalStats *S) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
StatsList.remove(S);
for (uptr I = 0; I < StatCount; I++)
add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
}
- void get(uptr *S) const {
+ void get(uptr *S) const EXCLUDES(Mutex) {
ScopedLock L(Mutex);
for (uptr I = 0; I < StatCount; I++)
S[I] = LocalStats::get(static_cast<StatType>(I));
S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
}
- void lock() { Mutex.lock(); }
- void unlock() { Mutex.unlock(); }
+ void lock() ACQUIRE(Mutex) { Mutex.lock(); }
+ void unlock() RELEASE(Mutex) { Mutex.unlock(); }
- void disable() { lock(); }
- void enable() { unlock(); }
+ void disable() ACQUIRE(Mutex) { lock(); }
+ void enable() RELEASE(Mutex) { unlock(); }
private:
mutable HybridMutex Mutex;
- DoublyLinkedList<LocalStats> StatsList;
+ DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
};
} // namespace scudo
-mno-omit-leaf-frame-pointer)
endif()
+append_list_if(COMPILER_RT_HAS_WTHREAD_SAFETY_FLAG -Werror=thread-safety
+ SCUDO_UNITTEST_CFLAGS)
+
set(SCUDO_TEST_ARCH ${SCUDO_STANDALONE_SUPPORTED_ARCH})
# gtests requires c++
EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
}
-SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) {
+SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
auto *Allocator = this->Allocator.get();
std::vector<void *> V;
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
pthread_join(Threads[I], 0);
}
+
+TEST(ScudoMutexTest, MutexAssertHeld) {
+ scudo::HybridMutex M;
+ M.lock();
+ M.assertHeld();
+ M.unlock();
+}
EXPECT_FALSE(Allocator->isInitialized());
auto Registry = Allocator->getTSDRegistry();
- Registry->init(Allocator.get());
+ Registry->initOnceMaybe(Allocator.get());
EXPECT_TRUE(Allocator->isInitialized());
}
--- /dev/null
+//===-- thread_annotations.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_THREAD_ANNOTATIONS_
+#define SCUDO_THREAD_ANNOTATIONS_
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely ignored when compiling with other compilers.
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
+
+#endif // SCUDO_THREAD_ANNOTATIONS_
void HybridMutex::unlock() {}
+void HybridMutex::assertHeldImpl() {}
+
u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
#include "atomic_helpers.h"
#include "common.h"
#include "mutex.h"
+#include "thread_annotations.h"
#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
#include <pthread.h>
namespace scudo {
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
+ // TODO: Add thread-safety annotation on `Cache` and `QuarantineCache`.
typename Allocator::CacheT Cache;
typename Allocator::QuarantineCacheT QuarantineCache;
using ThisT = TSD<Allocator>;
u8 DestructorIterations = 0;
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(DestructorIterations, 0U);
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
Instance->initCache(&Cache);
DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
}
- void commitBack(Allocator *Instance) { Instance->commitBack(this); }
-
- inline bool tryLock() {
+ inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
if (Mutex.tryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
return false;
}
- inline void lock() {
+ inline void lock() NO_THREAD_SAFETY_ANALYSIS {
atomic_store_relaxed(&Precedence, 0);
Mutex.lock();
}
- inline void unlock() { Mutex.unlock(); }
+ inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+ void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
+ Instance->commitBack(this);
+ }
+
private:
HybridMutex Mutex;
atomic_uptr Precedence = {};
template <class Allocator> void teardownThread(void *Ptr);
template <class Allocator> struct TSDRegistryExT {
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
Initialized = true;
}
- void initOnceMaybe(Allocator *Instance) {
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
- void unmapTestOnly(Allocator *Instance) {
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
DCHECK(Instance);
if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
FallbackTSD.commitBack(Instance);
FallbackTSD = {};
State = {};
+ ScopedLock L(Mutex);
Initialized = false;
}
initThread(Instance, MinimalInit);
}
- ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ // TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
+ // embedding the logic into TSD or always locking the TSD. It will enable us
+ // to properly mark thread annotation here and adding proper runtime
+ // assertions in the member functions of TSD. For example, assert the lock is
+ // acquired before calling TSD::commitBack().
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
// and force all threads to attempt to use it instead of their local one.
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
FallbackTSD.lock();
atomic_store(&Disabled, 1U, memory_order_release);
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
atomic_store(&Disabled, 0U, memory_order_release);
FallbackTSD.unlock();
Mutex.unlock();
}
pthread_key_t PThreadKey = {};
- bool Initialized = false;
+ bool Initialized GUARDED_BY(Mutex) = false;
atomic_u8 Disabled = {};
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
template <class Allocator>
thread_local ThreadState TSDRegistryExT<Allocator>::State;
-template <class Allocator> void teardownThread(void *Ptr) {
+template <class Allocator>
+void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
typedef TSDRegistryExT<Allocator> TSDRegistryT;
Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user
template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
struct TSDRegistrySharedT {
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
for (u32 I = 0; I < TSDsArraySize; I++)
Initialized = true;
}
- void initOnceMaybe(Allocator *Instance) {
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
- void unmapTestOnly(Allocator *Instance) {
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
for (u32 I = 0; I < TSDsArraySize; I++) {
TSDs[I].commitBack(Instance);
TSDs[I] = {};
}
setCurrentTSD(nullptr);
+ ScopedLock L(Mutex);
Initialized = false;
}
initThread(Instance);
}
- ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ // TSDs is an array of locks and which is not supported for marking
+ // thread-safety capability.
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
TSD<Allocator> *TSD = getCurrentTSD();
DCHECK(TSD);
*UnlockRequired = true;
return getTSDAndLockSlow(TSD);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
for (u32 I = 0; I < TSDsArraySize; I++)
TSDs[I].lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
TSDs[I].unlock();
Mutex.unlock();
return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
}
- bool setNumberOfTSDs(u32 N) {
+ bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
ScopedLock L(MutexTSDs);
if (N < NumberOfTSDs)
return false;
*getTlsPtr() |= B;
}
- NOINLINE void initThread(Allocator *Instance) {
+ NOINLINE void initThread(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
initOnceMaybe(Instance);
// Initial context assignment is done in a plain round-robin fashion.
const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
Instance->callPostInitCallback();
}
- NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+ // TSDs is an array of locks which is not supported for marking thread-safety
+ // capability.
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
+ EXCLUDES(MutexTSDs) {
// Use the Precedence of the current TSD as our random seed. Since we are
// in the slow path, it means that tryLock failed, and as a result it's
// very likely that said Precedence is non-zero.
}
atomic_u32 CurrentIndex = {};
- u32 NumberOfTSDs = 0;
- u32 NumberOfCoPrimes = 0;
- u32 CoPrimes[TSDsArraySize] = {};
- bool Initialized = false;
+ u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
+ u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
+ u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
+ bool Initialized GUARDED_BY(Mutex) = false;
HybridMutex Mutex;
HybridMutex MutexTSDs;
TSD<Allocator> TSDs[TSDsArraySize];