From bb50e97103360983c18639e258203aa1fdab90ff Mon Sep 17 00:00:00 2001 From: Vitaly Buka Date: Mon, 15 Nov 2021 15:18:53 -0800 Subject: [PATCH] [NFC][sanitizer] Change StackStore API to use StackTrace --- .../lib/sanitizer_common/sanitizer_stack_store.cpp | 19 +++++++++++++++ .../lib/sanitizer_common/sanitizer_stack_store.h | 7 +++++- .../lib/sanitizer_common/sanitizer_stackdepot.cpp | 28 +++++++++------------- 3 files changed, 36 insertions(+), 18 deletions(-) diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp index 33ce0c3..31a4573 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp @@ -14,8 +14,27 @@ #include "sanitizer_atomic.h" #include "sanitizer_common.h" +#include "sanitizer_stacktrace.h" + namespace __sanitizer { +static constexpr u32 kStackSizeBits = 16; + +StackStore::Id StackStore::store(const StackTrace &trace) { + uptr *stack_trace = alloc(trace.size + 1); + CHECK_LT(trace.size, 1 << kStackSizeBits); + *stack_trace = trace.size + (trace.tag << kStackSizeBits); + internal_memcpy(stack_trace + 1, trace.trace, trace.size * sizeof(uptr)); + return reinterpret_cast(stack_trace); +} + +StackTrace StackStore::load(Id id) { + const uptr *stack_trace = reinterpret_cast(id); + uptr size = *stack_trace & ((1 << kStackSizeBits) - 1); + uptr tag = *stack_trace >> kStackSizeBits; + return StackTrace(stack_trace + 1, size, tag); +} + uptr *StackStore::tryAlloc(uptr count) { // Optimisic lock-free allocation, essentially try to bump the region ptr. for (;;) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h index a23af37..604e9fb 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h @@ -16,17 +16,22 @@ #include "sanitizer_atomic.h" #include "sanitizer_internal_defs.h" #include "sanitizer_mutex.h" +#include "sanitizer_stacktrace.h" namespace __sanitizer { class StackStore { public: - uptr *alloc(uptr count = 1); + using Id = uptr; + + Id store(const StackTrace &trace); + StackTrace load(Id id); uptr allocated() const { return atomic_load_relaxed(&mapped_size); } void TestOnlyUnmap(); private: + uptr *alloc(uptr count = 1); uptr *tryAlloc(uptr count); uptr *refillAndAlloc(uptr count); mutable StaticSpinMutex mtx; // Protects alloc of new blocks. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp index 65fd1f9..f108ae2 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -19,15 +19,12 @@ namespace __sanitizer { -static StackStore stackStore; - struct StackDepotNode { using hash_type = u64; hash_type stack_hash; u32 link; static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; - static const u32 kStackSizeBits = 16; typedef StackTrace args_type; bool eq(hash_type hash, const args_type &args) const { @@ -50,14 +47,17 @@ struct StackDepotNode { typedef StackDepotHandle handle_type; }; +static StackStore stackStore; + // FIXME(dvyukov): this single reserved bit is used in TSan. typedef StackDepotBase StackDepot; static StackDepot theDepot; // Keep rarely accessed stack traces out of frequently access nodes to improve // caching efficiency. -static TwoLevelMap - tracePtrs; +static TwoLevelMap + storeIds; // Keep mutable data out of frequently access nodes to improve caching // efficiency. static TwoLevelMap> kStackSizeBits; - return args_type(stack_trace + 1, size, tag); + return stackStore.load(store_id); } StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } @@ -127,7 +121,7 @@ StackDepotHandle StackDepotNode::get_handle(u32 id) { void StackDepotTestOnlyUnmap() { theDepot.TestOnlyUnmap(); - tracePtrs.TestOnlyUnmap(); + storeIds.TestOnlyUnmap(); stackStore.TestOnlyUnmap(); } -- 2.7.4