namespace __tsan {
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+void MBlock::Lock() {
+ atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+ uptr v = atomic_load(a, memory_order_relaxed);
+ for (int iter = 0;; iter++) {
+ if (v & 1) {
+ if (iter < 10)
+ proc_yield(20);
+ else
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_relaxed);
+ continue;
+ }
+ if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
+ break;
+ }
+}
+
+void MBlock::Unlock() {
+ atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+ uptr v = atomic_load(a, memory_order_relaxed);
+ DCHECK(v & 1);
+ atomic_store(a, v & ~1, memory_order_relaxed);
+}
+
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
if (p == 0)
return 0;
MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
- b->size = sz;
- b->head = 0;
- b->alloc_tid = thr->unique_id;
- b->alloc_stack_id = CurrentStackId(thr, pc);
- if (CTX() && CTX()->initialized) {
+ b->Init(sz, thr->tid, CurrentStackId(thr, pc));
+ if (CTX() && CTX()->initialized)
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
- }
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
SignalUnsafeCall(thr, pc);
return p;
CHECK_NE(p, (void*)0);
DPrintf("#%d: free(%p)\n", thr->tid, p);
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- if (b->head) {
- Lock l(&b->mtx);
- for (SyncVar *s = b->head; s;) {
+ if (b->ListHead()) {
+ MBlock::ScopedLock l(b);
+ for (SyncVar *s = b->ListHead(); s;) {
SyncVar *res = s;
s = s->next;
StatInc(thr, StatSyncDestroyed);
res->mtx.Unlock();
DestroyAndFree(res);
}
- b->head = 0;
+ b->ListReset();
}
- if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
- MemoryRangeFreed(thr, pc, (uptr)p, b->size);
- }
- b->~MBlock();
+ if (CTX() && CTX()->initialized && thr->in_rtl == 1)
+ MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
allocator()->Deallocate(&thr->alloc_cache, p);
SignalUnsafeCall(thr, pc);
}
return 0;
if (p) {
MBlock *b = user_mblock(thr, p);
- internal_memcpy(p2, p, min(b->size, sz));
+ internal_memcpy(p2, p, min(b->Size(), sz));
}
}
- if (p) {
+ if (p)
user_free(thr, pc, p);
- }
return p2;
}
if (p == 0)
return 0;
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return (b) ? b->size : 0;
+ return b ? b->Size() : 0;
}
MBlock *user_mblock(ThreadState *thr, void *p) {
if (p == 0)
return 0;
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b->size;
+ return b->Size();
}
void __tsan_on_thread_idle() {
// Descriptor of user's memory block.
struct MBlock {
- Mutex mtx;
- uptr size;
- u32 alloc_tid;
- u32 alloc_stack_id;
- SyncVar *head;
+ /*
+ u64 mtx : 1; // must be first
+ u64 lst : 44;
+ u64 stk : 31; // on word boundary
+ u64 tid : kTidBits;
+ u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
+ */
+ u64 raw[2];
- MBlock()
- : mtx(MutexTypeMBlock, StatMtxMBlock) {
+ void Init(uptr siz, u32 tid, u32 stk) {
+ raw[0] = raw[1] = 0;
+ raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
+ raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
+ raw[0] |= (u64)stk << (1 + 44);
+ raw[1] |= (u64)stk >> (64 - 44 - 1);
+ DCHECK_EQ(Size(), siz);
+ DCHECK_EQ(Tid(), tid);
+ DCHECK_EQ(StackId(), stk);
}
+
+ u32 Tid() const {
+ return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
+ }
+
+ uptr Size() const {
+ return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
+ }
+
+ u32 StackId() const {
+ return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
+ }
+
+ SyncVar *ListHead() const {
+ return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
+ }
+
+ void ListPush(SyncVar *v) {
+ SyncVar *lst = ListHead();
+ v->next = lst;
+ u64 x = (u64)v ^ (u64)lst;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), v);
+ }
+
+ SyncVar *ListPop() {
+ SyncVar *lst = ListHead();
+ SyncVar *nxt = lst->next;
+ lst->next = 0;
+ u64 x = (u64)lst ^ (u64)nxt;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), nxt);
+ return lst;
+ }
+
+ void ListReset() {
+ SyncVar *lst = ListHead();
+ u64 x = (u64)lst;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), 0);
+ }
+
+ void Lock();
+ void Unlock();
+ typedef GenericScopedLock<MBlock> ScopedLock;
};
#ifndef TSAN_GO
return 0;
}
+static ThreadContext *FindThreadByTidLocked(int tid) {
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
+}
+
static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
uptr addr = (uptr)arg;
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
}
if (allocator()->PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(0, (void*)addr);
- ThreadContext *tctx = FindThreadByUidLocked(b->alloc_tid);
+ ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
loc->type = ReportLocationHeap;
loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->size;
- loc->tid = tctx ? tctx->tid : b->alloc_tid;
+ loc->size = b->Size();
+ loc->tid = tctx ? tctx->tid : b->Tid();
loc->name = 0;
loc->file = 0;
loc->line = 0;
loc->stack = 0;
uptr ssz = 0;
- const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
+ const uptr *stack = StackDepotGet(b->StackId(), &ssz);
if (stack) {
StackTrace trace;
trace.Init(stack, ssz);
// the hashmap anyway.
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(thr, (void*)addr);
- Lock l(&b->mtx);
+ MBlock::ScopedLock l(b);
SyncVar *res = 0;
- for (res = b->head; res; res = res->next) {
+ for (res = b->ListHead(); res; res = res->next) {
if (res->addr == addr)
break;
}
if (!create)
return 0;
res = Create(thr, pc, addr);
- res->next = b->head;
- b->head = res;
+ b->ListPush(res);
}
if (write_lock)
res->mtx.Lock();
MBlock *b = user_mblock(thr, (void*)addr);
SyncVar *res = 0;
{
- Lock l(&b->mtx);
- SyncVar **prev = &b->head;
- res = *prev;
- while (res) {
+ MBlock::ScopedLock l(b);
+ res = b->ListHead();
+ if (res) {
if (res->addr == addr) {
if (res->is_linker_init)
return 0;
- *prev = res->next;
- break;
+ b->ListPop();
+ } else {
+ SyncVar **prev = &res->next;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
}
- prev = &res->next;
- res = *prev;
}
}
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
return res;
}
#endif
EXPECT_NE(p2, p);
MBlock *b = user_mblock(thr, p);
EXPECT_NE(b, (MBlock*)0);
- EXPECT_EQ(b->size, (uptr)10);
+ EXPECT_EQ(b->Size(), (uptr)10);
MBlock *b2 = user_mblock(thr, p2);
EXPECT_NE(b2, (MBlock*)0);
- EXPECT_EQ(b2->size, (uptr)20);
+ EXPECT_EQ(b2->Size(), (uptr)20);
for (int i = 0; i < 10; i++) {
p[i] = 42;
EXPECT_EQ(b, user_mblock(thr, p + i));