installAtFork();
}
-void GuardedPoolAllocator::disable() { PoolMutex.lock(); }
+void GuardedPoolAllocator::disable() {
+ PoolMutex.lock();
+ BacktraceMutex.lock();
+}
-void GuardedPoolAllocator::enable() { PoolMutex.unlock(); }
+void GuardedPoolAllocator::enable() {
+ PoolMutex.unlock();
+ BacktraceMutex.unlock();
+}
void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
void *Arg) {
roundUpTo(Size, PageSize));
Meta->RecordAllocation(UserPtr, Size);
- Meta->AllocationTrace.RecordBacktrace(Backtrace);
+ {
+ ScopedLock UL(BacktraceMutex);
+ Meta->AllocationTrace.RecordBacktrace(Backtrace);
+ }
return reinterpret_cast<void *>(UserPtr);
}
// otherwise non-reentrant unwinders may deadlock.
if (!getThreadLocals()->RecursiveGuard) {
ScopedRecursiveGuard SRG;
+ ScopedLock UL(BacktraceMutex);
Meta->DeallocationTrace.RecordBacktrace(Backtrace);
}
}
// A mutex to protect the guarded slot and metadata pool for this class.
Mutex PoolMutex;
+ // Some unwinders can grab the libdl lock. In order to provide atfork
+ // protection, we need to ensure that we allow an unwinding thread to release
+ // the libdl lock before forking.
+ Mutex BacktraceMutex;
// Record the number allocations that we've sampled. We store this amount so
// that we don't randomly choose to recycle a slot that previously had an
// allocation before all the slots have been utilised.