From: Maria Guseva Date: Fri, 18 May 2018 16:07:01 +0000 (+0300) Subject: Introduce the "managed mode" to disable slow native backtraces X-Git-Tag: submit/tizen/20180620.112952^2~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d69ffe2000e6f84f17190f0dc95d1ebf43fd1a1d;p=sdk%2Ftools%2Fheaptrack.git Introduce the "managed mode" to disable slow native backtraces --- diff --git a/src/track/libheaptrack.cpp b/src/track/libheaptrack.cpp index 84901a1..99f6499 100644 --- a/src/track/libheaptrack.cpp +++ b/src/track/libheaptrack.cpp @@ -252,6 +252,10 @@ public: debugLog("%s", "calling initAfterCallback done"); } + // initialize managed mode + // TODO: make it user-defined, e.g. via enviroment variable + is_managed_mode = true; + debugLog("%s", "initialization done"); } @@ -302,7 +306,7 @@ public: void writeSMAPS(HeapTrack &heaptrack) { - if (!s_data || !s_data->out || !s_data->procSmaps) { + if (is_managed_mode || !s_data || !s_data->out || !s_data->procSmaps) { return; } @@ -559,6 +563,11 @@ public: TraceTree::knownNames.insert(classId); } + static bool isUnmanagedTraceNeeded() + { + return !is_managed_mode; + } + private: static int dl_iterate_phdr_callback(struct dl_phdr_info* info, size_t /*size*/, void* data) { @@ -813,11 +822,14 @@ private: static LockedData* s_data; static size_t k_pageSize; + static bool is_managed_mode; }; atomic HeapTrack::s_locked{false}; HeapTrack::LockedData* HeapTrack::s_data{nullptr}; size_t HeapTrack::k_pageSize{0u}; +bool HeapTrack::is_managed_mode{false}; + } extern "C" { @@ -863,7 +875,8 @@ void heaptrack_dlopen(const vector>> &newMm { trace.fill(dlopenOriginal); } else { - trace.fill(2); + if (HeapTrack::isUnmanagedTraceNeeded()) + trace.fill(2); } HeapTrack heaptrack(guard); @@ -897,7 +910,8 @@ void heaptrack_malloc(void* ptr, size_t size) debugLog("heaptrack_malloc(%p, %zu)", ptr, size); Trace trace; - trace.fill(2); + if (HeapTrack::isUnmanagedTraceNeeded()) + trace.fill(2); HeapTrack heaptrack(guard); heaptrack.handleMalloc(ptr, size, trace); @@ -925,7 +939,8 @@ void heaptrack_realloc(void* ptr_in, size_t size, void* ptr_out) debugLog("heaptrack_realloc(%p, %zu, %p)", ptr_in, size, ptr_out); Trace trace; - trace.fill(2); + if (HeapTrack::isUnmanagedTraceNeeded()) + trace.fill(2); HeapTrack heaptrack(guard); if (ptr_in) { @@ -945,7 +960,8 @@ void heaptrack_mmap(void* ptr, size_t length, int prot, int flags, int fd, off64 ptr, length, prot, flags, fd, offset); Trace trace; - trace.fill(2); + if (HeapTrack::isUnmanagedTraceNeeded()) + trace.fill(2); HeapTrack heaptrack(guard); heaptrack.handleMmap(ptr, length, prot, 0, fd, trace); @@ -972,7 +988,8 @@ void heaptrack_objectallocate(void *objectId, unsigned long objectSize) { debugLog("handleObjectAllocation: %p %lu", objectId, objectSize); Trace trace; - trace.fill(2); + if (HeapTrack::isUnmanagedTraceNeeded()) + trace.fill(2); HeapTrack heaptrack(guard); heaptrack.handleObjectAllocation(objectId, objectSize, trace); diff --git a/src/track/tracetree.h b/src/track/tracetree.h index e80d40b..81e9df7 100644 --- a/src/track/tracetree.h +++ b/src/track/tracetree.h @@ -90,12 +90,12 @@ public: StackEntry *stackIter = g_shadowStack; if (stackIter != nullptr) { - void* managedStack[Trace::MAX_SIZE]; + void* managedStack[MANAGED_MAX_SIZE]; int managedStackSize = 0; handleIP((void *) (uintptr_t) -1, false); - while (stackIter != nullptr && managedStackSize < Trace::MAX_SIZE) { + while (stackIter != nullptr && managedStackSize < MANAGED_MAX_SIZE) { void *ip = reinterpret_cast(stackIter->m_funcId); if (knownNames.find(ip) == knownNames.end()) { @@ -143,6 +143,10 @@ public: private: TraceEdge m_root = {0, 0, {}}; uint32_t m_index = 1; + enum : int + { + MANAGED_MAX_SIZE = 64 + }; }; #endif // TRACETREE_H