// This file implements __sanitizer_print_memory_profile.
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "lsan/lsan_common.h"
#include "asan/asan_allocator.h"
#if CAN_SANITIZE_LEAKS
+#include <stdarg.h>
+#include <errno.h>
+
+#if SANITIZER_LINUX && !SANITIZER_GO
+#include <sys/time.h>
+#include <time.h>
+#endif
+
namespace __asan {
+class HeapProfile; // Pointer to the global object
+HeapProfile *heap_profile = nullptr; // responsible for profiling
+
struct AllocationSite {
u32 id;
uptr total_size;
class HeapProfile {
public:
- HeapProfile() : allocations_(1024) {}
+ explicit HeapProfile(const CommonFlags *cf)
+ : allocations_(1024), hp_flags_(cf), hp_start_ts_(NanoTime()) {
+ if (internal_strcmp(hp_flags_->heap_profile_log_path, "") == 0) {
+ // HeapProfiler's report_file initialized to the global one
+ hp_report_file_ = &report_file;
+ }
+ else {
+ // ... or to the private one
+ hp_report_file_ = &hp_private_report_file_;
+ hp_report_file_->SetReportPath(hp_flags_->heap_profile_log_path);
+ }
+ }
+
+ void HPPrintf(const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ HPVPrintf(format, args);
+ va_end(args);
+ }
+
void Insert(u32 id, uptr size) {
total_allocated_ += size;
total_count_++;
allocations_.push_back({id, size, 1});
}
+ enum class HPProfileType {
+ NONE,
+ SHORT,
+ FULL
+ };
+
+ HPProfileType NeededProfileType() {
+ HPProfileType res = HPProfileType::NONE;
+ static float lim_percent =
+ (float)(hp_flags_->heap_profile_out_full_lim) / 100;
+ uptr cur_rss = GetRSS();
+ u64 cur_time = NanoTime();
+
+ // Full profile initiated if and only if one of the following is true:
+ // * memory usage (Resident Set Size) has increased from the
+ // previous time;
+ // * memory usage has decreased, and we're set to track it;
+ // * profile timeout has expired, and we're set to track it.
+ if ((cur_rss > prev_rss_ * (1 + lim_percent)) ||
+ (hp_flags_->heap_profile_out_decrease &&
+ (cur_rss < prev_rss_ * (1 - lim_percent))) ||
+ (hp_flags_->heap_profile_full_out_time &&
+ TimeoutHasExpired(HPProfileType::FULL, cur_time))) {
+ res = HPProfileType::FULL;
+ prev_rss_ = cur_rss;
+ prev_profile_ts_ = cur_time;
+ prev_short_profile_ts_ = cur_time;
+ }
+ // Short profile initiated if and only if profile timeout has expired,
+ // and we're set to track it.
+ else if (hp_flags_->heap_profile_short_out_time &&
+ TimeoutHasExpired(HPProfileType::SHORT, cur_time)) {
+ res = HPProfileType::SHORT;
+ prev_short_profile_ts_ = cur_time;
+ }
+
+ return res;
+ }
+
+ void HPPrintHeader(HPProfileType hp_prof_type) {
+ switch (hp_prof_type) {
+ case HPProfileType::NONE:
+ break;
+ case HPProfileType::SHORT:
+ HPPrintf("SHORT ");
+ case HPProfileType::FULL:
+ // prev_rss_ already set to current RSS
+ HPPrintf("HEAP PROFILE at RSS %zd%s",
+ hp_flags_->heap_profile_rss_mb ? prev_rss_ >> 20 : prev_rss_,
+ hp_flags_->heap_profile_rss_mb ? "Mb" : "b");
+ if (hp_flags_->heap_profile_timestamp) {
+ // print timestamps since creation of HeapProfile object.
+ // prev_short_profile_ts_ set to current time in either case
+ u64 nsecs_since_start = prev_short_profile_ts_ - hp_start_ts_;
+ u64 ts_sec = nsecs_since_start / (1000 * 1000 * 1000);
+ u64 ts_usec = (nsecs_since_start % (1000 * 1000 * 1000)) / 1000;
+ HPPrintf(" time %llu.%06llu", ts_sec, ts_usec);
+ }
+ HPPrintf("\n");
+ break;
+ }
+ }
+
+ void HPPrintFooter(HPProfileType hp_prof_type) {
+ switch (hp_prof_type) {
+ case HPProfileType::SHORT:
+ case HPProfileType::FULL:
+ HPPrintf("\n\n");
+ case HPProfileType::NONE:
+ break;
+ }
+ }
+
+ void HPClearAllocPool() {
+ total_count_ = 0;
+ total_allocated_ = 0;
+ allocations_.clear();
+ };
+
+
+ private:
+ // Basically same as __sanitizer::StackTrace::Print(),
+ // but with HPPrintf() calls.
+ void PrintStackTrace(StackTrace stack_trace) {
+ if (stack_trace.trace == nullptr || stack_trace.size == 0) {
+ HPPrintf(" <empty stack>\n\n");
+ return;
+ }
+
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ InternalScopedString dedup_token(GetPageSizeCached());
+ int dedup_frames = common_flags()->dedup_token_length;
+ uptr frame_num = 0;
+
+ for (uptr i = 0; i < stack_trace.size && stack_trace.trace[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack_trace.trace[i]);
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ CHECK(frames);
+
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ frame_desc.clear();
+ RenderFrame(&frame_desc, common_flags()->stack_trace_format,
+ frame_num++, cur->info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ HPPrintf("%s\n", frame_desc.data());
+
+ if (dedup_frames-- > 0) {
+ if (dedup_token.length())
+ dedup_token.append("--");
+ dedup_token.append(cur->info.function);
+ }
+ }
+
+ frames->ClearAll();
+ }
+
+ // Always print a trailing empty line after stack trace.
+ HPPrintf("\n");
+ if (dedup_token.length())
+ HPPrintf("DEDUP_TOKEN: %s\n", dedup_token.data());
+ }
+
+
+ public:
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
- CHECK(total_allocated_);
- uptr total_shown = 0;
- Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
+ HPPrintf("Live Heap Allocations: %zd bytes from %zd allocations; "
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
+ if (UNLIKELY(!total_allocated_)) {
+ return;
+ }
+ uptr total_shown = 0;
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
- Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
- a.total_size * 100 / total_allocated_, a.count);
- StackDepotGet(a.id).Print();
+ HPPrintf("%zd byte(s) (%zd%%) in %zd allocation(s)\n",
+ a.total_size, a.total_size * 100 / total_allocated_, a.count);
+ PrintStackTrace(StackDepotGet(a.id));
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_ > top_percent)
break;
uptr total_allocated_ = 0;
uptr total_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
+
+ const CommonFlags *hp_flags_;
+
+ SpinMutex hp_report_file_mu_;
+ ReportFile hp_private_report_file_ = {&hp_report_file_mu_,
+ kStderrFd, "", "", 0};
+ ReportFile *hp_report_file_;
+
+ // Keep track of previous profile values
+ uptr prev_rss_ = 0;
+ u64 prev_profile_ts_ = 0;
+ u64 prev_short_profile_ts_ = 0;
+ u64 hp_start_ts_;
+
+ void HPRawWrite(const char *buffer) {
+ hp_report_file_->Write(buffer, internal_strlen(buffer));
+ }
+
+ // A helper function dumping arbitrary sized buffer to report file.
+ // Basically same as __sanitizer::SharedPrintfCode.
+ void HPVPrintf(const char *format, va_list args) {
+ va_list args2;
+ va_copy(args2, args);
+ const int kLen = 16 * 1024;
+
+ // |local_buffer| is small enough not to overflow the stack and/or violate
+ // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other
+ // hand, the bigger the buffer is, the more the chance the error report will
+ // fit into it.
+ char local_buffer[400];
+
+ int needed_length;
+ char *buffer = local_buffer;
+ int buffer_size = ARRAY_SIZE(local_buffer);
+
+ // First try to print a message using a local buffer, and then fall back to
+ // mmaped buffer.
+ for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
+ if (use_mmap) {
+ va_end(args);
+ va_copy(args, args2);
+ buffer = (char*)MmapOrDie(kLen, "HPVPrintf");
+ buffer_size = kLen;
+ }
+
+ needed_length = 0;
+ needed_length += internal_vsnprintf(buffer + needed_length,
+ buffer_size - needed_length,
+ format, args);
+
+ // Check that data fits into the current buffer.
+ // If it does not, report and die.
+ // If it does, just print it.
+ if (needed_length >= buffer_size) {
+ if (!use_mmap) continue;
+ RAW_CHECK_MSG(needed_length < kLen,
+ "Buffer in Report is too short!\n");
+ }
+ break;
+ }
+
+ HPRawWrite(buffer);
+
+ // If we had mapped any memory, clean it up.
+ if (buffer != local_buffer)
+ UnmapOrDie((void *)buffer, buffer_size);
+ va_end(args2);
+ }
+
+ bool TimeoutHasExpired(HPProfileType hp_prof_type, u64 cur_time) {
+ if (UNLIKELY(hp_prof_type == HPProfileType::NONE)) {
+ return false;
+ }
+
+ u64 timeout_msecs;
+ u64 prev_ts;
+ u64 threshold_ts;
+
+ switch (hp_prof_type) {
+ case HPProfileType::FULL:
+ timeout_msecs = hp_flags_->heap_profile_full_out_time;
+ prev_ts = prev_profile_ts_;
+ break;
+
+ case HPProfileType::SHORT:
+ timeout_msecs = hp_flags_->heap_profile_short_out_time;
+ prev_ts = prev_short_profile_ts_;
+ break;
+
+ default:
+ break;
+ }
+
+ threshold_ts = prev_ts + timeout_msecs * 1000 * 1000;
+ return cur_time >= threshold_ts;
+ }
};
static void ChunkCallback(uptr chunk, void *arg) {
- HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
- hp->Insert(id, cv.UsedSize());
+ heap_profile->Insert(id, cv.UsedSize());
}
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- HeapProfile hp;
- __lsan::ForEachChunk(ChunkCallback, &hp);
- hp.Print(reinterpret_cast<uptr>(argument));
+ heap_profile->HPClearAllocPool();
+ // No need to pass anything to callback, so just pass null
+ __lsan::ForEachChunk(ChunkCallback, nullptr);
+ heap_profile->Print(reinterpret_cast<uptr>(argument));
}
+// Static buffer to be used in placement new
+static unsigned char heap_profile_buf[sizeof(HeapProfile)];
+
} // namespace __asan
+#endif // CAN_SANITIZE_LEAKS
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_memory_profile(uptr top_percent) {
- __sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
+#if CAN_SANITIZE_LEAKS
+ // If this function is ever called, then the global
+ // heap_profile object is guaranteed to be used.
+ if (UNLIKELY(__asan::heap_profile == nullptr)) {
+ __asan::heap_profile = new(__asan::heap_profile_buf)
+ __asan::HeapProfile(__sanitizer::common_flags());
+ }
+
+ __asan::HeapProfile::HPProfileType hp_prof_type =
+ __asan::heap_profile->NeededProfileType();
+ __asan::heap_profile->HPPrintHeader(hp_prof_type);
+ if (hp_prof_type == __asan::HeapProfile::HPProfileType::FULL) {
+ __sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
+ }
+ __asan::heap_profile->HPPrintFooter(hp_prof_type);
+#endif
}
} // extern "C"
-
-#endif // CAN_SANITIZE_LEAKS
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
-COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(bool, heap_profile, false,
+ "Experimental. Enables heap profiler (asan-only).")
+COMMON_FLAG(bool, heap_profile_timestamp, false,
+ "If set, prints timestamps along with heap profiler data.")
+COMMON_FLAG(int, heap_profile_out_full_lim, 10,
+ "Threshold value (in percentages) of RSS growth which"
+ " triggers printing memory usage by heap profiler.")
+COMMON_FLAG(bool, heap_profile_out_decrease, false,
+ "If set, prints memory profile on memory usage decrease.")
+COMMON_FLAG(uptr, heap_profile_full_out_time, 0,
+ "If non-zero, sets timeout (in milliseconds) for unconditional"
+ " printing of memory usage data by heap profiler; otherwise"
+ " there's no timeout.")
+COMMON_FLAG(uptr, heap_profile_short_out_time, 0,
+ "If non-zero, sets timeout (in milliseconds) for unconditional"
+ " printing of short memory usage data by heap profiler;"
+ " otherwise there's no timeout. This should be less than"
+ " heap_profile_full_out_time or no short output will be produced,"
+ " as printing full memory profile has higher precedence.")
+COMMON_FLAG(int, heap_profile_top_percent, 90,
+ "Sets threshold (in percentages) for memory allocations to be"
+ " shown, from largest to smallest.")
+COMMON_FLAG(bool, heap_profile_rss_mb, true,
+ "If true, prints RSS memory usage in MBs instead of bytes.")
+COMMON_FLAG(const char *, heap_profile_log_path, "",
+ "If set, uses separate file to write heap profile reports.")
COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
"Experimental. Only affects a 64-bit allocator. If set, tries to "
"release unused memory to the OS, but not more often than this "