if(COMPILER_RT_TSAN_DEBUG_OUTPUT)
# Add extra debug information to TSan runtime. This configuration is rarely
# used, but we need to support it so that debug output will not bitrot.
- list(APPEND TSAN_CFLAGS -DTSAN_COLLECT_STATS=1
- -DTSAN_DEBUG_OUTPUT=2)
+ list(APPEND TSAN_CFLAGS -DTSAN_DEBUG_OUTPUT=2)
endif()
set(TSAN_RTL_CFLAGS ${TSAN_CFLAGS})
rtl/tsan_rtl_report.cpp
rtl/tsan_rtl_thread.cpp
rtl/tsan_stack_trace.cpp
- rtl/tsan_stat.cpp
rtl/tsan_suppressions.cpp
rtl/tsan_symbolize.cpp
rtl/tsan_sync.cpp
rtl/tsan_report.h
rtl/tsan_rtl.h
rtl/tsan_stack_trace.h
- rtl/tsan_stat.h
rtl/tsan_suppressions.h
rtl/tsan_symbolize.h
rtl/tsan_sync.h
..\rtl\tsan_rtl_report.cpp ^
..\rtl\tsan_rtl_thread.cpp ^
..\rtl\tsan_rtl_proc.cpp ^
- ..\rtl\tsan_stat.cpp ^
..\rtl\tsan_suppressions.cpp ^
..\rtl\tsan_sync.cpp ^
..\rtl\tsan_stack_trace.cpp ^
../rtl/tsan_rtl_thread.cpp
../rtl/tsan_rtl_proc.cpp
../rtl/tsan_stack_trace.cpp
- ../rtl/tsan_stat.cpp
../rtl/tsan_suppressions.cpp
../rtl/tsan_sync.cpp
../../sanitizer_common/sanitizer_allocator.cpp
// release-store operation by the thread with release_store_tid_ index.
// release_store_reused_ - reuse count of release_store_tid_.
-// We don't have ThreadState in these methods, so this is an ugly hack that
-// works only in C++.
-#if !SANITIZER_GO
-# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
-#else
-# define CPP_STAT_INC(typ) (void)0
-#endif
-
namespace __tsan {
static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(src->size_, kMaxTid);
- CPP_STAT_INC(StatClockAcquire);
// Check if it's empty -> no need to do anything.
const uptr nclk = src->size_;
- if (nclk == 0) {
- CPP_STAT_INC(StatClockAcquireEmpty);
+ if (nclk == 0)
return;
- }
bool acquired = false;
for (unsigned i = 0; i < kDirtyTids; i++) {
// Check if we've already acquired src after the last release operation on src
if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
// O(N) acquire.
- CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
u64 *dst_pos = &clk_[0];
for (ClockElem &src_elem : *src) {
}
if (acquired) {
- CPP_STAT_INC(StatClockAcquiredSomething);
last_acquire_ = clk_[tid_];
ResetCached(c);
}
sc->release_store_reused_ = 0;
if (acquired) {
- CPP_STAT_INC(StatClockAcquiredSomething);
last_acquire_ = clk_[tid_];
ResetCached(c);
}
return;
}
- CPP_STAT_INC(StatClockRelease);
// Check if we need to resize dst.
if (dst->size_ < nclk_)
dst->Resize(c, nclk_);
}
// O(N) release.
- CPP_STAT_INC(StatClockReleaseFull);
dst->Unshare(c);
// First, remember whether we've acquired dst.
bool acquired = IsAlreadyAcquired(dst);
- if (acquired)
- CPP_STAT_INC(StatClockReleaseAcquired);
// Update dst->clk_.
dst->FlushDirty();
uptr i = 0;
i++;
}
// Clear 'acquired' flag in the remaining elements.
- if (nclk_ < dst->size_)
- CPP_STAT_INC(StatClockReleaseClearTail);
dst->release_store_tid_ = kInvalidTid;
dst->release_store_reused_ = 0;
// If we've acquired dst, remember this fact,
void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(dst->size_, kMaxTid);
- CPP_STAT_INC(StatClockStore);
if (dst->size_ == 0 && cached_idx_ != 0) {
// Reuse the cached clock.
if (dst->release_store_tid_ == tid_ &&
dst->release_store_reused_ == reused_ &&
!HasAcquiredAfterRelease(dst)) {
- CPP_STAT_INC(StatClockStoreFast);
UpdateCurrentThread(c, dst);
return;
}
// O(N) release-store.
- CPP_STAT_INC(StatClockStoreFull);
dst->Unshare(c);
// Note: dst can be larger than this ThreadClock.
// This is fine since clk_ beyond size is all zeros.
}
void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
- CPP_STAT_INC(StatClockAcquireRelease);
acquire(c, dst);
ReleaseStore(c, dst);
}
SyncClock::Dirty *dirty = &dst->dirty_[i];
const unsigned tid = dirty->tid();
if (tid == tid_ || tid == kInvalidTid) {
- CPP_STAT_INC(StatClockReleaseFast);
dirty->set_tid(tid_);
dirty->epoch = clk_[tid_];
return;
// Reset all 'acquired' flags, O(N).
// We are going to touch dst elements, so we need to unshare it.
dst->Unshare(c);
- CPP_STAT_INC(StatClockReleaseSlow);
dst->elem(tid_).epoch = clk_[tid_];
for (uptr i = 0; i < dst->size_; i++)
dst->elem(i).reused = 0;
}
void SyncClock::Resize(ClockCache *c, uptr nclk) {
- CPP_STAT_INC(StatClockReleaseResize);
Unshare(c);
if (nclk <= capacity()) {
// Memory is already allocated, just increase the size.
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_stat.h"
#include "ubsan/ubsan_platform.h"
// Setup defaults for compile definitions.
# define TSAN_NO_HISTORY 0
#endif
-#ifndef TSAN_COLLECT_STATS
-# define TSAN_COLLECT_STATS 0
-#endif
-
#ifndef TSAN_CONTAINS_UBSAN
# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
void build_consistency_release();
#endif
-#if TSAN_COLLECT_STATS
-void build_consistency_stats();
-#else
-void build_consistency_nostats();
-#endif
-
static inline void USED build_consistency() {
#if SANITIZER_DEBUG
build_consistency_debug();
#else
build_consistency_release();
#endif
-#if TSAN_COLLECT_STATS
- build_consistency_stats();
-#else
- build_consistency_nostats();
-#endif
}
template<typename T>
return ret; \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = (uptr)__builtin_return_address(0); \
- StatInc(thr, StatAnnotation); \
- StatInc(thr, Stat##typ); \
ScopedAnnotation sa(thr, __func__, caller_pc); \
const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
ExpectRace expect;
ExpectRace benign;
- DynamicAnnContext()
- : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
- }
+ DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
};
static DynamicAnnContext *dyn_ann_ctx;
const uptr callpc = (uptr)__builtin_return_address(0); \
uptr pc = StackTrace::GetCurrentPc(); \
mo = convert_morder(mo); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
ScopedAtomic sa(thr, callpc, a, mo, __func__); \
return Atomic##func(thr, pc, __VA_ARGS__); \
/**/
ThreadState *thr_;
};
-static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
- StatInc(thr, StatAtomic);
- StatInc(thr, t);
- StatInc(thr, size == 1 ? StatAtomic1
- : size == 2 ? StatAtomic2
- : size == 4 ? StatAtomic4
- : size == 8 ? StatAtomic8
- : StatAtomic16);
- StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
- : mo == mo_consume ? StatAtomicConsume
- : mo == mo_acquire ? StatAtomicAcquire
- : mo == mo_release ? StatAtomicRelease
- : mo == mo_acq_rel ? StatAtomicAcq_Rel
- : StatAtomicSeq_Cst);
-}
-
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
Mutex mtx;
Processor *proc;
- GlobalProc()
- : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
- , proc(ProcCreate()) {
- }
+ GlobalProc() : mtx(MutexTypeGlobalProc), proc(ProcCreate()) {}
};
static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
static const int kActiveSpinCnt = 20;
};
-Mutex::Mutex(MutexType type, StatType stat_type) {
+Mutex::Mutex(MutexType type) {
CHECK_GT(type, MutexTypeInvalid);
CHECK_LT(type, MutexTypeCount);
#if SANITIZER_DEBUG
type_ = type;
#endif
-#if TSAN_COLLECT_STATS
- stat_type_ = stat_type;
-#endif
atomic_store(&state_, kUnlocked, memory_order_relaxed);
}
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS && !SANITIZER_GO
- StatInc(cur_thread(), stat_type_, backoff.Contention());
-#endif
return;
}
}
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS && !SANITIZER_GO
- StatInc(cur_thread(), stat_type_, backoff.Contention());
-#endif
return;
}
}
class Mutex {
public:
- explicit Mutex(MutexType type, StatType stat_type);
+ explicit Mutex(MutexType type);
~Mutex();
void Lock();
#if SANITIZER_DEBUG
MutexType type_;
#endif
-#if TSAN_COLLECT_STATS
- StatType stat_type_;
-#endif
Mutex(const Mutex&);
void operator = (const Mutex&);
Context::Context()
: initialized(),
- report_mtx(MutexTypeReport, StatMtxReport),
+ report_mtx(MutexTypeReport),
nreported(),
nmissed_expected(),
thread_registry(new (thread_registry_placeholder) ThreadRegistry(
CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
- racy_mtx(MutexTypeRacy, StatMtxRacy),
+ racy_mtx(MutexTypeRacy),
racy_stacks(),
racy_addresses(),
- fired_suppressions_mtx(MutexTypeFired, StatMtxFired),
+ fired_suppressions_mtx(MutexTypeFired),
clock_alloc(LINKER_INITIALIZED, "clock allocator") {
fired_suppressions.reserve(8);
}
failed = OnFinalize(failed);
-#if TSAN_COLLECT_STATS
- StatAggregate(ctx->stat, thr->stat);
- StatOutput(ctx->stat);
-#endif
-
return failed ? common_flags()->exitcode : 0;
}
void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
// This potentially can live in an MMX/SSE scratch register.
// The required intrinsics are:
return;
// choose a random candidate slot and replace it
StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
- StatInc(thr, StatShadowReplace);
return;
RACE:
HandleRace(thr, shadow_mem, cur, old);
if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopRodata);
return;
}
FastState fast_state = thr->fast_state;
if (UNLIKELY(fast_state.GetIgnoreBit())) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopIgnored);
return;
}
if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite))) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopSame);
return;
}
u64 *shadow_mem, Shadow cur) {
if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite))) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopSame);
return;
}
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
if (kCollectHistory) {
thr->fast_state.IncrementEpoch();
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
if (kCollectHistory) {
thr->fast_state.IncrementEpoch();
void build_consistency_release() {}
#endif
-#if TSAN_COLLECT_STATS
-void build_consistency_stats() {}
-#else
-void build_consistency_nostats() {}
-#endif
-
} // namespace __tsan
#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
-#if TSAN_COLLECT_STATS
- u64 stat[StatCnt];
-#endif
const u32 tid;
const int unique_id;
bool in_symbolizer;
Flags flags;
- u64 stat[StatCnt];
u64 int_alloc_cnt[MBlockTypeCount];
u64 int_alloc_siz[MBlockTypeCount];
};
ObtainCurrentStack(thr, pc, &stack); \
stack.ReverseOrder();
-#if TSAN_COLLECT_STATS
-void StatAggregate(u64 *dst, u64 *src);
-void StatOutput(u64 *stat);
-#endif
-
-void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
-#if TSAN_COLLECT_STATS
- thr->stat[typ] += n;
-#endif
-}
-void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
-#if TSAN_COLLECT_STATS
- thr->stat[typ] = n;
-#endif
-}
-
void MapShadow(uptr addr, uptr size);
void MapThreadTrace(uptr addr, uptr size, const char *name);
void DontNeedShadowFor(uptr addr, uptr size);
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
- StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
#if !SANITIZER_GO
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
- StatInc(thr, StatMutexCreate);
if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
- StatInc(thr, StatMutexDestroy);
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
const bool first = s->recursion == 0;
s->recursion += rec;
if (first) {
- StatInc(thr, StatMutexLock);
AcquireImpl(thr, pc, &s->clock);
AcquireImpl(thr, pc, &s->read_clock);
} else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
- StatInc(thr, StatMutexRecLock);
}
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
bool pre_lock = false;
rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
s->recursion -= rec;
if (s->recursion == 0) {
- StatInc(thr, StatMutexUnlock);
s->owner_tid = kInvalidTid;
ReleaseStoreImpl(thr, pc, &s->clock);
} else {
- StatInc(thr, StatMutexRecUnlock);
}
}
thr->mset.Del(s->GetId(), true);
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
- StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
if (s->owner_tid == kInvalidTid) {
// Seems to be read unlock.
write = false;
- StatInc(thr, StatMutexReadUnlock);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
ReleaseImpl(thr, pc, &s->read_clock);
CHECK_GT(s->recursion, 0);
s->recursion--;
if (s->recursion == 0) {
- StatInc(thr, StatMutexUnlock);
s->owner_tid = kInvalidTid;
ReleaseStoreImpl(thr, pc, &s->clock);
} else {
- StatInc(thr, StatMutexRecUnlock);
}
} else if (!s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
return;
thr->clock.set(thr->fast_state.epoch());
thr->clock.acquire(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncAcquire);
}
void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncReleaseStoreAcquire);
}
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncRelease);
}
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncRelease);
}
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.acq_rel(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncAcquire);
- StatInc(thr, StatSyncRelease);
}
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
ReleaseImpl(args->thr, 0, &sync);
creation_stack_id = CurrentStackId(args->thr, args->pc);
- if (reuse_count == 0)
- StatInc(args->thr, StatThreadMaxTid);
}
void ThreadContext::OnReset() {
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
- StatInc(thr, StatSyncAcquire);
sync.Reset(&thr->proc()->clock_cache);
thr->is_inited = true;
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
PlatformCleanUpThreadState(thr);
#endif
thr->~ThreadState();
-#if TSAN_COLLECT_STATS
- StatAggregate(ctx->stat, thr->stat);
-#endif
thr = 0;
}
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- StatInc(thr, StatThreadCreate);
OnCreatedArgs args = { thr, pc };
u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
int tid =
ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
- StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
return tid;
}
void ThreadFinish(ThreadState *thr) {
ThreadCheckIgnore(thr);
- StatInc(thr, StatThreadFinish);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
}
#endif
- StatInc(thr, StatMopRange);
-
if (*shadow_mem == kShadowRodata) {
DCHECK(!is_write);
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
- StatInc(thr, StatMopRangeRodata);
return;
}
+++ /dev/null
-//===-- tsan_stat.cpp -----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "tsan_stat.h"
-#include "tsan_rtl.h"
-
-namespace __tsan {
-
-#if TSAN_COLLECT_STATS
-
-void StatAggregate(u64 *dst, u64 *src) {
- for (int i = 0; i < StatCnt; i++)
- dst[i] += src[i];
-}
-
-void StatOutput(u64 *stat) {
- stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
-
- static const char *name[StatCnt] = {};
- name[StatMop] = "Memory accesses ";
- name[StatMopRead] = " Including reads ";
- name[StatMopWrite] = " writes ";
- name[StatMop1] = " Including size 1 ";
- name[StatMop2] = " size 2 ";
- name[StatMop4] = " size 4 ";
- name[StatMop8] = " size 8 ";
- name[StatMopSame] = " Including same ";
- name[StatMopIgnored] = " Including ignored ";
- name[StatMopRange] = " Including range ";
- name[StatMopRodata] = " Including .rodata ";
- name[StatMopRangeRodata] = " Including .rodata range ";
- name[StatShadowProcessed] = "Shadow processed ";
- name[StatShadowZero] = " Including empty ";
- name[StatShadowNonZero] = " Including non empty ";
- name[StatShadowSameSize] = " Including same size ";
- name[StatShadowIntersect] = " intersect ";
- name[StatShadowNotIntersect] = " not intersect ";
- name[StatShadowSameThread] = " Including same thread ";
- name[StatShadowAnotherThread] = " another thread ";
- name[StatShadowReplace] = " Including evicted ";
-
- name[StatFuncEnter] = "Function entries ";
- name[StatFuncExit] = "Function exits ";
- name[StatEvents] = "Events collected ";
-
- name[StatThreadCreate] = "Total threads created ";
- name[StatThreadFinish] = " threads finished ";
- name[StatThreadReuse] = " threads reused ";
- name[StatThreadMaxTid] = " max tid ";
- name[StatThreadMaxAlive] = " max alive threads ";
-
- name[StatMutexCreate] = "Mutexes created ";
- name[StatMutexDestroy] = " destroyed ";
- name[StatMutexLock] = " lock ";
- name[StatMutexUnlock] = " unlock ";
- name[StatMutexRecLock] = " recursive lock ";
- name[StatMutexRecUnlock] = " recursive unlock ";
- name[StatMutexReadLock] = " read lock ";
- name[StatMutexReadUnlock] = " read unlock ";
-
- name[StatSyncCreated] = "Sync objects created ";
- name[StatSyncDestroyed] = " destroyed ";
- name[StatSyncAcquire] = " acquired ";
- name[StatSyncRelease] = " released ";
-
- name[StatClockAcquire] = "Clock acquire ";
- name[StatClockAcquireEmpty] = " empty clock ";
- name[StatClockAcquireFastRelease] = " fast from release-store ";
- name[StatClockAcquireFull] = " full (slow) ";
- name[StatClockAcquiredSomething] = " acquired something ";
- name[StatClockRelease] = "Clock release ";
- name[StatClockReleaseResize] = " resize ";
- name[StatClockReleaseFast] = " fast ";
- name[StatClockReleaseSlow] = " dirty overflow (slow) ";
- name[StatClockReleaseFull] = " full (slow) ";
- name[StatClockReleaseAcquired] = " was acquired ";
- name[StatClockReleaseClearTail] = " clear tail ";
- name[StatClockStore] = "Clock release store ";
- name[StatClockStoreResize] = " resize ";
- name[StatClockStoreFast] = " fast ";
- name[StatClockStoreFull] = " slow ";
- name[StatClockStoreTail] = " clear tail ";
- name[StatClockAcquireRelease] = "Clock acquire-release ";
-
- name[StatAtomic] = "Atomic operations ";
- name[StatAtomicLoad] = " Including load ";
- name[StatAtomicStore] = " store ";
- name[StatAtomicExchange] = " exchange ";
- name[StatAtomicFetchAdd] = " fetch_add ";
- name[StatAtomicFetchSub] = " fetch_sub ";
- name[StatAtomicFetchAnd] = " fetch_and ";
- name[StatAtomicFetchOr] = " fetch_or ";
- name[StatAtomicFetchXor] = " fetch_xor ";
- name[StatAtomicFetchNand] = " fetch_nand ";
- name[StatAtomicCAS] = " compare_exchange ";
- name[StatAtomicFence] = " fence ";
- name[StatAtomicRelaxed] = " Including relaxed ";
- name[StatAtomicConsume] = " consume ";
- name[StatAtomicAcquire] = " acquire ";
- name[StatAtomicRelease] = " release ";
- name[StatAtomicAcq_Rel] = " acq_rel ";
- name[StatAtomicSeq_Cst] = " seq_cst ";
- name[StatAtomic1] = " Including size 1 ";
- name[StatAtomic2] = " size 2 ";
- name[StatAtomic4] = " size 4 ";
- name[StatAtomic8] = " size 8 ";
- name[StatAtomic16] = " size 16 ";
-
- name[StatAnnotation] = "Dynamic annotations ";
- name[StatAnnotateHappensBefore] = " HappensBefore ";
- name[StatAnnotateHappensAfter] = " HappensAfter ";
- name[StatAnnotateCondVarSignal] = " CondVarSignal ";
- name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll ";
- name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
- name[StatAnnotateCondVarWait] = " CondVarWait ";
- name[StatAnnotateRWLockCreate] = " RWLockCreate ";
- name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic ";
- name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
- name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
- name[StatAnnotateRWLockReleased] = " RWLockReleased ";
- name[StatAnnotateTraceMemory] = " TraceMemory ";
- name[StatAnnotateFlushState] = " FlushState ";
- name[StatAnnotateNewMemory] = " NewMemory ";
- name[StatAnnotateNoOp] = " NoOp ";
- name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces ";
- name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection ";
- name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar ";
- name[StatAnnotatePCQGet] = " PCQGet ";
- name[StatAnnotatePCQPut] = " PCQPut ";
- name[StatAnnotatePCQDestroy] = " PCQDestroy ";
- name[StatAnnotatePCQCreate] = " PCQCreate ";
- name[StatAnnotateExpectRace] = " ExpectRace ";
- name[StatAnnotateBenignRaceSized] = " BenignRaceSized ";
- name[StatAnnotateBenignRace] = " BenignRace ";
- name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin ";
- name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd ";
- name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin ";
- name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd ";
- name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin ";
- name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd ";
- name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
- name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
- name[StatAnnotateThreadName] = " ThreadName ";
- name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
- name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
- name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
- name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
- name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
- name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
- name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
- name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
- name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
- name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
-
- name[StatMtxTotal] = "Contentionz ";
- name[StatMtxTrace] = " Trace ";
- name[StatMtxThreads] = " Threads ";
- name[StatMtxReport] = " Report ";
- name[StatMtxSyncVar] = " SyncVar ";
- name[StatMtxSyncTab] = " SyncTab ";
- name[StatMtxSlab] = " Slab ";
- name[StatMtxAtExit] = " Atexit ";
- name[StatMtxAnnotations] = " Annotations ";
- name[StatMtxMBlock] = " MBlock ";
- name[StatMtxDeadlockDetector] = " DeadlockDetector ";
- name[StatMtxFired] = " FiredSuppressions ";
- name[StatMtxRacy] = " RacyStacks ";
- name[StatMtxFD] = " FD ";
- name[StatMtxGlobalProc] = " GlobalProc ";
-
- Printf("Statistics:\n");
- for (int i = 0; i < StatCnt; i++)
- Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
-}
-
-#endif
-
-} // namespace __tsan
+++ /dev/null
-//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TSAN_STAT_H
-#define TSAN_STAT_H
-
-namespace __tsan {
-
-enum StatType {
- // Memory access processing related stuff.
- StatMop,
- StatMopRead,
- StatMopWrite,
- StatMop1, // These must be consequtive.
- StatMop2,
- StatMop4,
- StatMop8,
- StatMopSame,
- StatMopIgnored,
- StatMopRange,
- StatMopRodata,
- StatMopRangeRodata,
- StatShadowProcessed,
- StatShadowZero,
- StatShadowNonZero, // Derived.
- StatShadowSameSize,
- StatShadowIntersect,
- StatShadowNotIntersect,
- StatShadowSameThread,
- StatShadowAnotherThread,
- StatShadowReplace,
-
- // Func processing.
- StatFuncEnter,
- StatFuncExit,
-
- // Trace processing.
- StatEvents,
-
- // Threads.
- StatThreadCreate,
- StatThreadFinish,
- StatThreadReuse,
- StatThreadMaxTid,
- StatThreadMaxAlive,
-
- // Mutexes.
- StatMutexCreate,
- StatMutexDestroy,
- StatMutexLock,
- StatMutexUnlock,
- StatMutexRecLock,
- StatMutexRecUnlock,
- StatMutexReadLock,
- StatMutexReadUnlock,
-
- // Synchronization.
- StatSyncCreated,
- StatSyncDestroyed,
- StatSyncAcquire,
- StatSyncRelease,
- StatSyncReleaseStoreAcquire,
-
- // Clocks - acquire.
- StatClockAcquire,
- StatClockAcquireEmpty,
- StatClockAcquireFastRelease,
- StatClockAcquireFull,
- StatClockAcquiredSomething,
- // Clocks - release.
- StatClockRelease,
- StatClockReleaseResize,
- StatClockReleaseFast,
- StatClockReleaseSlow,
- StatClockReleaseFull,
- StatClockReleaseAcquired,
- StatClockReleaseClearTail,
- // Clocks - release store.
- StatClockStore,
- StatClockStoreResize,
- StatClockStoreFast,
- StatClockStoreFull,
- StatClockStoreTail,
- // Clocks - acquire-release.
- StatClockAcquireRelease,
-
- // Atomics.
- StatAtomic,
- StatAtomicLoad,
- StatAtomicStore,
- StatAtomicExchange,
- StatAtomicFetchAdd,
- StatAtomicFetchSub,
- StatAtomicFetchAnd,
- StatAtomicFetchOr,
- StatAtomicFetchXor,
- StatAtomicFetchNand,
- StatAtomicCAS,
- StatAtomicFence,
- StatAtomicRelaxed,
- StatAtomicConsume,
- StatAtomicAcquire,
- StatAtomicRelease,
- StatAtomicAcq_Rel,
- StatAtomicSeq_Cst,
- StatAtomic1,
- StatAtomic2,
- StatAtomic4,
- StatAtomic8,
- StatAtomic16,
-
- // Dynamic annotations.
- StatAnnotation,
- StatAnnotateHappensBefore,
- StatAnnotateHappensAfter,
- StatAnnotateCondVarSignal,
- StatAnnotateCondVarSignalAll,
- StatAnnotateMutexIsNotPHB,
- StatAnnotateCondVarWait,
- StatAnnotateRWLockCreate,
- StatAnnotateRWLockCreateStatic,
- StatAnnotateRWLockDestroy,
- StatAnnotateRWLockAcquired,
- StatAnnotateRWLockReleased,
- StatAnnotateTraceMemory,
- StatAnnotateFlushState,
- StatAnnotateNewMemory,
- StatAnnotateNoOp,
- StatAnnotateFlushExpectedRaces,
- StatAnnotateEnableRaceDetection,
- StatAnnotateMutexIsUsedAsCondVar,
- StatAnnotatePCQGet,
- StatAnnotatePCQPut,
- StatAnnotatePCQDestroy,
- StatAnnotatePCQCreate,
- StatAnnotateExpectRace,
- StatAnnotateBenignRaceSized,
- StatAnnotateBenignRace,
- StatAnnotateIgnoreReadsBegin,
- StatAnnotateIgnoreReadsEnd,
- StatAnnotateIgnoreWritesBegin,
- StatAnnotateIgnoreWritesEnd,
- StatAnnotateIgnoreSyncBegin,
- StatAnnotateIgnoreSyncEnd,
- StatAnnotatePublishMemoryRange,
- StatAnnotateUnpublishMemoryRange,
- StatAnnotateThreadName,
- Stat__tsan_mutex_create,
- Stat__tsan_mutex_destroy,
- Stat__tsan_mutex_pre_lock,
- Stat__tsan_mutex_post_lock,
- Stat__tsan_mutex_pre_unlock,
- Stat__tsan_mutex_post_unlock,
- Stat__tsan_mutex_pre_signal,
- Stat__tsan_mutex_post_signal,
- Stat__tsan_mutex_pre_divert,
- Stat__tsan_mutex_post_divert,
-
- // Internal mutex contentionz.
- StatMtxTotal,
- StatMtxTrace,
- StatMtxThreads,
- StatMtxReport,
- StatMtxSyncVar,
- StatMtxSyncTab,
- StatMtxSlab,
- StatMtxAnnotations,
- StatMtxAtExit,
- StatMtxMBlock,
- StatMtxDeadlockDetector,
- StatMtxFired,
- StatMtxRacy,
- StatMtxFD,
- StatMtxGlobalProc,
-
- // This must be the last.
- StatCnt
-};
-
-} // namespace __tsan
-
-#endif // TSAN_STAT_H
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncVar::SyncVar()
- : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
- Reset(0);
-}
+SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
this->addr = addr;
// CreateThreadContext.
TraceHeader headers[kTraceParts];
- Trace()
- : mtx(MutexTypeTrace, StatMtxTrace) {
- }
+ Trace() : mtx(MutexTypeTrace) {}
};
} // namespace __tsan
// produce sligtly less efficient code.
//===----------------------------------------------------------------------===//
do {
- StatInc(thr, StatShadowProcessed);
const unsigned kAccessSize = 1 << kAccessSizeLog;
u64 *sp = &shadow_mem[idx];
old = LoadShadow(sp);
if (LIKELY(old.IsZero())) {
- StatInc(thr, StatShadowZero);
if (!stored) {
StoreIfNotYetStored(sp, &store_word);
stored = true;
}
// is the memory access equal to the previous?
if (LIKELY(Shadow::Addr0AndSizeAreEqual(cur, old))) {
- StatInc(thr, StatShadowSameSize);
// same thread?
if (LIKELY(Shadow::TidsAreEqual(old, cur))) {
- StatInc(thr, StatShadowSameThread);
if (LIKELY(old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))) {
StoreIfNotYetStored(sp, &store_word);
stored = true;
}
break;
}
- StatInc(thr, StatShadowAnotherThread);
if (HappensBefore(old, thr)) {
if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) {
StoreIfNotYetStored(sp, &store_word);
}
// Do the memory access intersect?
if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
- StatInc(thr, StatShadowIntersect);
- if (Shadow::TidsAreEqual(old, cur)) {
- StatInc(thr, StatShadowSameThread);
+ if (Shadow::TidsAreEqual(old, cur))
break;
- }
- StatInc(thr, StatShadowAnotherThread);
if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
break;
if (LIKELY(HappensBefore(old, thr)))
goto RACE;
}
// The accesses do not intersect.
- StatInc(thr, StatShadowNotIntersect);
break;
} while (0);
}
TEST(Mutex, Write) {
- Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
+ Mutex mtx(MutexTypeAnnotations);
TestData<Mutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)
}
TEST(Mutex, ReadWrite) {
- Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
+ Mutex mtx(MutexTypeAnnotations);
TestData<Mutex> data(&mtx);
pthread_t threads[kThreads];
for (int i = 0; i < kThreads; i++)