From: Dmitry Vyukov Date: Mon, 18 Mar 2013 19:47:36 +0000 (+0000) Subject: tsan: smaller memory block headers (32b->16b) X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4ddd37ba5b48eb20a5102abd193437b1641a8e78;p=platform%2Fupstream%2Fllvm.git tsan: smaller memory block headers (32b->16b) llvm-svn: 177312 --- diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/compiler-rt/lib/tsan/rtl/tsan_mman.cc index 7c7b89c..35cf43d 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cc @@ -29,6 +29,32 @@ extern "C" void WEAK __tsan_free_hook(void *ptr) { namespace __tsan { +COMPILER_CHECK(sizeof(MBlock) == 16); + +void MBlock::Lock() { + atomic_uintptr_t *a = reinterpret_cast(this); + uptr v = atomic_load(a, memory_order_relaxed); + for (int iter = 0;; iter++) { + if (v & 1) { + if (iter < 10) + proc_yield(20); + else + internal_sched_yield(); + v = atomic_load(a, memory_order_relaxed); + continue; + } + if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire)) + break; + } +} + +void MBlock::Unlock() { + atomic_uintptr_t *a = reinterpret_cast(this); + uptr v = atomic_load(a, memory_order_relaxed); + DCHECK(v & 1); + atomic_store(a, v & ~1, memory_order_relaxed); +} + struct MapUnmapCallback { void OnMap(uptr p, uptr size) const { } void OnUnmap(uptr p, uptr size) const { @@ -79,13 +105,9 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { if (p == 0) return 0; MBlock *b = new(allocator()->GetMetaData(p)) MBlock; - b->size = sz; - b->head = 0; - b->alloc_tid = thr->unique_id; - b->alloc_stack_id = CurrentStackId(thr, pc); - if (CTX() && CTX()->initialized) { + b->Init(sz, thr->tid, CurrentStackId(thr, pc)); + if (CTX() && CTX()->initialized) MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); - } DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); SignalUnsafeCall(thr, pc); return p; @@ -96,9 +118,9 @@ void user_free(ThreadState *thr, uptr pc, void *p) { CHECK_NE(p, (void*)0); DPrintf("#%d: free(%p)\n", thr->tid, p); MBlock *b = (MBlock*)allocator()->GetMetaData(p); - if (b->head) { - Lock l(&b->mtx); - for (SyncVar *s = b->head; s;) { + if (b->ListHead()) { + MBlock::ScopedLock l(b); + for (SyncVar *s = b->ListHead(); s;) { SyncVar *res = s; s = s->next; StatInc(thr, StatSyncDestroyed); @@ -106,12 +128,10 @@ void user_free(ThreadState *thr, uptr pc, void *p) { res->mtx.Unlock(); DestroyAndFree(res); } - b->head = 0; + b->ListReset(); } - if (CTX() && CTX()->initialized && thr->in_rtl == 1) { - MemoryRangeFreed(thr, pc, (uptr)p, b->size); - } - b->~MBlock(); + if (CTX() && CTX()->initialized && thr->in_rtl == 1) + MemoryRangeFreed(thr, pc, (uptr)p, b->Size()); allocator()->Deallocate(&thr->alloc_cache, p); SignalUnsafeCall(thr, pc); } @@ -127,12 +147,11 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { return 0; if (p) { MBlock *b = user_mblock(thr, p); - internal_memcpy(p2, p, min(b->size, sz)); + internal_memcpy(p2, p, min(b->Size(), sz)); } } - if (p) { + if (p) user_free(thr, pc, p); - } return p2; } @@ -141,7 +160,7 @@ uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) { if (p == 0) return 0; MBlock *b = (MBlock*)allocator()->GetMetaData(p); - return (b) ? b->size : 0; + return b ? b->Size() : 0; } MBlock *user_mblock(ThreadState *thr, void *p) { @@ -232,7 +251,7 @@ uptr __tsan_get_allocated_size(void *p) { if (p == 0) return 0; MBlock *b = (MBlock*)allocator()->GetMetaData(p); - return b->size; + return b->Size(); } void __tsan_on_thread_idle() { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index 3db1a89..053f24a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -47,15 +47,73 @@ namespace __tsan { // Descriptor of user's memory block. struct MBlock { - Mutex mtx; - uptr size; - u32 alloc_tid; - u32 alloc_stack_id; - SyncVar *head; + /* + u64 mtx : 1; // must be first + u64 lst : 44; + u64 stk : 31; // on word boundary + u64 tid : kTidBits; + u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 + */ + u64 raw[2]; - MBlock() - : mtx(MutexTypeMBlock, StatMtxMBlock) { + void Init(uptr siz, u32 tid, u32 stk) { + raw[0] = raw[1] = 0; + raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); + raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); + raw[0] |= (u64)stk << (1 + 44); + raw[1] |= (u64)stk >> (64 - 44 - 1); + DCHECK_EQ(Size(), siz); + DCHECK_EQ(Tid(), tid); + DCHECK_EQ(StackId(), stk); } + + u32 Tid() const { + return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); + } + + uptr Size() const { + return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); + } + + u32 StackId() const { + return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); + } + + SyncVar *ListHead() const { + return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); + } + + void ListPush(SyncVar *v) { + SyncVar *lst = ListHead(); + v->next = lst; + u64 x = (u64)v ^ (u64)lst; + x = (x >> 3) << 1; + raw[0] ^= x; + DCHECK_EQ(ListHead(), v); + } + + SyncVar *ListPop() { + SyncVar *lst = ListHead(); + SyncVar *nxt = lst->next; + lst->next = 0; + u64 x = (u64)lst ^ (u64)nxt; + x = (x >> 3) << 1; + raw[0] ^= x; + DCHECK_EQ(ListHead(), nxt); + return lst; + } + + void ListReset() { + SyncVar *lst = ListHead(); + u64 x = (u64)lst; + x = (x >> 3) << 1; + raw[0] ^= x; + DCHECK_EQ(ListHead(), 0); + } + + void Lock(); + void Unlock(); + typedef GenericScopedLock ScopedLock; }; #ifndef TSAN_GO diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc index 5967f77..2df4234 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc @@ -216,6 +216,13 @@ static ThreadContext *FindThreadByUidLocked(int unique_id) { return 0; } +static ThreadContext *FindThreadByTidLocked(int tid) { + Context *ctx = CTX(); + ctx->thread_registry->CheckLocked(); + return static_cast( + ctx->thread_registry->GetThreadLocked(tid)); +} + static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { uptr addr = (uptr)arg; ThreadContext *tctx = static_cast(tctx_base); @@ -306,20 +313,20 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { } if (allocator()->PointerIsMine((void*)addr)) { MBlock *b = user_mblock(0, (void*)addr); - ThreadContext *tctx = FindThreadByUidLocked(b->alloc_tid); + ThreadContext *tctx = FindThreadByTidLocked(b->Tid()); void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); ReportLocation *loc = new(mem) ReportLocation(); rep_->locs.PushBack(loc); loc->type = ReportLocationHeap; loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); - loc->size = b->size; - loc->tid = tctx ? tctx->tid : b->alloc_tid; + loc->size = b->Size(); + loc->tid = tctx ? tctx->tid : b->Tid(); loc->name = 0; loc->file = 0; loc->line = 0; loc->stack = 0; uptr ssz = 0; - const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz); + const uptr *stack = StackDepotGet(b->StackId(), &ssz); if (stack) { StackTrace trace; trace.Init(stack, ssz); diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cc b/compiler-rt/lib/tsan/rtl/tsan_sync.cc index dc22bc3..94bad21 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cc @@ -82,9 +82,9 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, // the hashmap anyway. if (PrimaryAllocator::PointerIsMine((void*)addr)) { MBlock *b = user_mblock(thr, (void*)addr); - Lock l(&b->mtx); + MBlock::ScopedLock l(b); SyncVar *res = 0; - for (res = b->head; res; res = res->next) { + for (res = b->ListHead(); res; res = res->next) { if (res->addr == addr) break; } @@ -92,8 +92,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, if (!create) return 0; res = Create(thr, pc, addr); - res->next = b->head; - b->head = res; + b->ListPush(res); } if (write_lock) res->mtx.Lock(); @@ -149,25 +148,34 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { MBlock *b = user_mblock(thr, (void*)addr); SyncVar *res = 0; { - Lock l(&b->mtx); - SyncVar **prev = &b->head; - res = *prev; - while (res) { + MBlock::ScopedLock l(b); + res = b->ListHead(); + if (res) { if (res->addr == addr) { if (res->is_linker_init) return 0; - *prev = res->next; - break; + b->ListPop(); + } else { + SyncVar **prev = &res->next; + res = *prev; + while (res) { + if (res->addr == addr) { + if (res->is_linker_init) + return 0; + *prev = res->next; + break; + } + prev = &res->next; + res = *prev; + } + } + if (res) { + StatInc(thr, StatSyncDestroyed); + res->mtx.Lock(); + res->mtx.Unlock(); } - prev = &res->next; - res = *prev; } } - if (res) { - StatInc(thr, StatSyncDestroyed); - res->mtx.Lock(); - res->mtx.Unlock(); - } return res; } #endif diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc b/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc index ecbe874..0961d2b 100644 --- a/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc +++ b/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc @@ -55,10 +55,10 @@ TEST(Mman, User) { EXPECT_NE(p2, p); MBlock *b = user_mblock(thr, p); EXPECT_NE(b, (MBlock*)0); - EXPECT_EQ(b->size, (uptr)10); + EXPECT_EQ(b->Size(), (uptr)10); MBlock *b2 = user_mblock(thr, p2); EXPECT_NE(b2, (MBlock*)0); - EXPECT_EQ(b2->size, (uptr)20); + EXPECT_EQ(b2->Size(), (uptr)20); for (int i = 0; i < 10; i++) { p[i] = 42; EXPECT_EQ(b, user_mblock(thr, p + i));