From: Dmitry Vyukov Date: Tue, 21 Sep 2021 08:49:32 +0000 (+0200) Subject: tsan: include MBlock/SyncObj stats into mem profile X-Git-Tag: upstream/15.0.7~30881 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=eefef56ece7e27c8746cd207e8e2d96996ea5de1;p=platform%2Fupstream%2Fllvm.git tsan: include MBlock/SyncObj stats into mem profile Include info about MBlock/SyncObj memory consumption in the memory profile. Depends on D110148. Reviewed By: melver, vitalybuka Differential Revision: https://reviews.llvm.org/D110149 --- diff --git a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h index 68ded43..9e15f74 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h +++ b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h @@ -49,11 +49,7 @@ class DenseSlabAlloc { static_assert(sizeof(T) > sizeof(IndexT), "it doesn't make sense to use dense alloc"); - explicit DenseSlabAlloc(LinkerInitialized, const char *name) { - freelist_ = 0; - fillpos_ = 0; - name_ = name; - } + DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {} explicit DenseSlabAlloc(const char *name) : DenseSlabAlloc(LINKER_INITIALIZED, name) { @@ -89,6 +85,8 @@ class DenseSlabAlloc { } void FlushCache(Cache *c) { + if (!c->pos) + return; SpinMutexLock lock(&mtx_); while (c->pos) { IndexT idx = c->cache[--c->pos]; @@ -102,33 +100,39 @@ class DenseSlabAlloc { internal_memset(c->cache, 0, sizeof(c->cache)); } + uptr AllocatedMemory() const { + return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T); + } + private: T *map_[kL1Size]; SpinMutex mtx_; - IndexT freelist_; - uptr fillpos_; - const char *name_; + IndexT freelist_ = {0}; + atomic_uintptr_t fillpos_ = {0}; + const char *const name_; void Refill(Cache *c) { SpinMutexLock lock(&mtx_); if (freelist_ == 0) { - if (fillpos_ == kL1Size) { + uptr fillpos = atomic_load_relaxed(&fillpos_); + if (fillpos == kL1Size) { Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n", name_, kL1Size, kL2Size); Die(); } - VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", - name_, fillpos_, kL1Size, kL2Size); + VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_, + fillpos, kL1Size, kL2Size); T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_); // Reserve 0 as invalid index. - IndexT start = fillpos_ == 0 ? 1 : 0; + IndexT start = fillpos == 0 ? 1 : 0; for (IndexT i = start; i < kL2Size; i++) { new(batch + i) T; - *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; + *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size; } *(IndexT*)(batch + kL2Size - 1) = 0; - freelist_ = fillpos_ * kL2Size + start; - map_[fillpos_++] = batch; + freelist_ = fillpos * kL2Size + start; + map_[fillpos] = batch; + atomic_store_relaxed(&fillpos_, fillpos + 1); } for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) { IndexT idx = freelist_; diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp index 087ee88..4efc211 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp @@ -121,17 +121,24 @@ void FillProfileCallback(uptr p, uptr rss, bool file, void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { uptr mem[MemCount]; - internal_memset(mem, 0, sizeof(mem[0]) * MemCount); + internal_memset(mem, 0, sizeof(mem)); __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); + auto meta = ctx->metamap.GetMemoryStats(); StackDepotStats *stacks = StackDepotGetStats(); + // All these are allocated from the common mmap region. + mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks->allocated; + if (s64(mem[MemMmap]) < 0) + mem[MemMmap] = 0; internal_snprintf(buf, buf_size, - "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" - " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", - mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, - mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, - mem[MemHeap] >> 20, mem[MemOther] >> 20, - stacks->allocated >> 20, stacks->n_uniq_ids, - nlive, nthread); + "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" + " trace:%zd heap:%zd other:%zd memblocks:%zd syncobj:%zu" + " stacks=%zd[%zd] nthr=%zd/%zd\n", + mem[MemTotal] >> 20, mem[MemShadow] >> 20, + mem[MemMeta] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20, + mem[MemTrace] >> 20, mem[MemHeap] >> 20, + mem[MemOther] >> 20, meta.mem_block >> 20, + meta.sync_obj >> 20, stacks->allocated >> 20, + stacks->n_uniq_ids, nlive, nthread); } #if SANITIZER_LINUX diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp index 8d20f55..f042aba 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp @@ -269,4 +269,11 @@ void MetaMap::OnProcIdle(Processor *proc) { sync_alloc_.FlushCache(&proc->sync_cache); } +MetaMap::MemoryStats MetaMap::GetMemoryStats() const { + MemoryStats stats; + stats.mem_block = block_alloc_.AllocatedMemory(); + stats.sync_obj = sync_alloc_.AllocatedMemory(); + return stats; +} + } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h index 3d8d57f..fc8fa28 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -127,6 +127,13 @@ class MetaMap { void OnProcIdle(Processor *proc); + struct MemoryStats { + uptr mem_block; + uptr sync_obj; + }; + + MemoryStats GetMemoryStats() const; + private: static const u32 kFlagMask = 3u << 30; static const u32 kFlagBlock = 1u << 30;