1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
38 #include <unistd.h> // for write()
40 #include <fcntl.h> // for open()
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
77 using tcmalloc::FillProcSelfMaps; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader[] = "heap profile: ";
94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
95 #if defined(TYPE_PROFILING)
96 static const char kTypeProfileStatsHeader[] = "type statistics:\n";
97 #endif // defined(TYPE_PROFILING)
99 //----------------------------------------------------------------------
101 const char HeapProfileTable::kFileExt[] = ".heap";
103 //----------------------------------------------------------------------
105 static const int kHashTableSize = 179999; // Size for bucket_table_.
106 /*static*/ const int HeapProfileTable::kMaxStackDepth;
108 //----------------------------------------------------------------------
110 // We strip out different number of stack frames in debug mode
111 // because less inlining happens in that case
113 static const int kStripFrames = 2;
115 static const int kStripFrames = 3;
118 // For sorting Stats or Buckets by in-use space
119 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
120 HeapProfileTable::Stats* b) {
121 // Return true iff "a" has more allocated space than "b"
122 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
125 //----------------------------------------------------------------------
127 HeapProfileTable::HeapProfileTable(Allocator alloc,
133 profile_mmap_(profile_mmap),
136 // Make a hash table for buckets.
137 const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
138 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
139 memset(bucket_table_, 0, table_bytes);
141 // Make an allocation map.
143 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
146 memset(&total_, 0, sizeof(total_));
150 HeapProfileTable::~HeapProfileTable() {
151 // Free the allocation map.
152 address_map_->~AllocationMap();
153 dealloc_(address_map_);
156 // Free the hash table.
157 for (int i = 0; i < kHashTableSize; i++) {
158 for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
159 Bucket* bucket = curr;
161 dealloc_(bucket->stack);
165 dealloc_(bucket_table_);
166 bucket_table_ = NULL;
169 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
170 const void* const key[]) {
173 for (int i = 0; i < depth; i++) {
174 h += reinterpret_cast<uintptr_t>(key[i]);
181 // Lookup stack trace in table
182 unsigned int buck = ((unsigned int) h) % kHashTableSize;
183 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
184 if ((b->hash == h) &&
185 (b->depth == depth) &&
186 equal(key, key + depth, b->stack)) {
192 const size_t key_size = sizeof(key[0]) * depth;
193 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
194 copy(key, key + depth, kcopy);
195 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
196 memset(b, 0, sizeof(*b));
200 b->next = bucket_table_[buck];
201 bucket_table_[buck] = b;
206 int HeapProfileTable::GetCallerStackTrace(
207 int skip_count, void* stack[kMaxStackDepth]) {
208 return MallocHook::GetCallerStackTrace(
209 stack, kMaxStackDepth, kStripFrames + skip_count + 1);
212 void HeapProfileTable::RecordAlloc(
213 const void* ptr, size_t bytes, int stack_depth,
214 const void* const call_stack[]) {
215 Bucket* b = GetBucket(stack_depth, call_stack);
217 b->alloc_size += bytes;
219 total_.alloc_size += bytes;
222 v.set_bucket(b); // also did set_live(false); set_ignore(false)
224 address_map_->Insert(ptr, v);
227 void HeapProfileTable::RecordFree(const void* ptr) {
229 if (address_map_->FindAndRemove(ptr, &v)) {
230 Bucket* b = v.bucket();
232 b->free_size += v.bytes;
234 total_.free_size += v.bytes;
238 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
239 const AllocValue* alloc_value = address_map_->Find(ptr);
240 if (alloc_value != NULL) *object_size = alloc_value->bytes;
241 return alloc_value != NULL;
244 bool HeapProfileTable::FindAllocDetails(const void* ptr,
245 AllocInfo* info) const {
246 const AllocValue* alloc_value = address_map_->Find(ptr);
247 if (alloc_value != NULL) {
248 info->object_size = alloc_value->bytes;
249 info->call_stack = alloc_value->bucket()->stack;
250 info->stack_depth = alloc_value->bucket()->depth;
252 return alloc_value != NULL;
255 bool HeapProfileTable::FindInsideAlloc(const void* ptr,
257 const void** object_ptr,
258 size_t* object_size) const {
259 const AllocValue* alloc_value =
260 address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
261 if (alloc_value != NULL) *object_size = alloc_value->bytes;
262 return alloc_value != NULL;
265 bool HeapProfileTable::MarkAsLive(const void* ptr) {
266 AllocValue* alloc = address_map_->FindMutable(ptr);
267 if (alloc && !alloc->live()) {
268 alloc->set_live(true);
274 void HeapProfileTable::MarkAsIgnored(const void* ptr) {
275 AllocValue* alloc = address_map_->FindMutable(ptr);
277 alloc->set_ignore(true);
281 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f,
283 const AllocationAddressIteratorArgs args(f, data);
284 address_map_->Iterate<const AllocationAddressIteratorArgs&>(
285 AllocationAddressesIterator, args);
288 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) {
289 const MarkArgs args(mark, true);
290 address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
293 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) {
294 const MarkArgs args(mark, true);
295 address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
298 // We'd be happier using snprintfer, but we don't to reduce dependencies.
299 int HeapProfileTable::UnparseBucket(const Bucket& b,
300 char* buf, int buflen, int bufsize,
302 Stats* profile_stats) {
303 if (profile_stats != NULL) {
304 profile_stats->allocs += b.allocs;
305 profile_stats->alloc_size += b.alloc_size;
306 profile_stats->frees += b.frees;
307 profile_stats->free_size += b.free_size;
310 snprintf(buf + buflen, bufsize - buflen,
311 "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
313 b.alloc_size - b.free_size,
317 // If it looks like the snprintf failed, ignore the fact we printed anything
318 if (printed < 0 || printed >= bufsize - buflen) return buflen;
320 for (int d = 0; d < b.depth; d++) {
321 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
322 reinterpret_cast<uintptr_t>(b.stack[d]));
323 if (printed < 0 || printed >= bufsize - buflen) return buflen;
326 printed = snprintf(buf + buflen, bufsize - buflen, "\n");
327 if (printed < 0 || printed >= bufsize - buflen) return buflen;
332 HeapProfileTable::Bucket**
333 HeapProfileTable::MakeSortedBucketList() const {
334 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
336 int bucket_count = 0;
337 for (int i = 0; i < kHashTableSize; i++) {
338 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
339 list[bucket_count++] = curr;
342 RAW_DCHECK(bucket_count == num_buckets_, "");
344 sort(list, list + num_buckets_, ByAllocatedSpace);
349 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark,
350 const char* file_name) {
351 RawFD fd = RawOpenForWriting(file_name);
352 if (fd == kIllegalRawFD) {
353 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name);
356 const DumpMarkedArgs args(fd, mark);
357 address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args);
361 #if defined(TYPE_PROFILING)
362 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const {
363 RawFD fd = RawOpenForWriting(file_name);
364 if (fd == kIllegalRawFD) {
365 RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name);
369 AddressMap<TypeCount>* type_size_map;
370 type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>)))
371 AddressMap<TypeCount>(alloc_, dealloc_);
372 address_map_->Iterate(TallyTypesItererator, type_size_map);
374 RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader));
375 const DumpArgs args(fd, NULL);
376 type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args);
379 type_size_map->~AddressMap<TypeCount>();
380 dealloc_(type_size_map);
382 #endif // defined(TYPE_PROFILING)
384 void HeapProfileTable::IterateOrderedAllocContexts(
385 AllocContextIterator callback) const {
386 Bucket** list = MakeSortedBucketList();
387 AllocContextInfo info;
388 for (int i = 0; i < num_buckets_; ++i) {
389 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
390 info.stack_depth = list[i]->depth;
391 info.call_stack = list[i]->stack;
397 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
398 Bucket** list = MakeSortedBucketList();
400 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
401 // In the cases buf is too small, we'd rather leave out the last
402 // buckets than leave out the /proc/self/maps info. To ensure that,
403 // we actually print the /proc/self/maps info first, then move it to
404 // the end of the buffer, then write the bucket info into whatever
405 // is remaining, and then move the maps info one last time to close
407 int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
408 if (map_length < 0 || map_length >= size) return 0;
409 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
410 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
411 RAW_DCHECK(map_length <= size, "");
412 char* const map_start = buf + size - map_length; // move to end
413 memmove(map_start, buf, map_length);
417 memset(&stats, 0, sizeof(stats));
418 int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
419 if (bucket_length < 0 || bucket_length >= size) return 0;
420 bucket_length = UnparseBucket(total_, buf, bucket_length, size,
421 " heapprofile", &stats);
423 // Dump the mmap list first.
425 BufferArgs buffer(buf, bucket_length, size);
426 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
427 bucket_length = buffer.buflen;
430 for (int i = 0; i < num_buckets_; i++) {
431 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
434 RAW_DCHECK(bucket_length < size, "");
438 RAW_DCHECK(buf + bucket_length <= map_start, "");
439 memmove(buf + bucket_length, map_start, map_length); // close the gap
441 return bucket_length + map_length;
445 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
447 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
451 #if defined(TYPE_PROFILING)
453 void HeapProfileTable::TallyTypesItererator(
456 AddressMap<TypeCount>* type_size_map) {
457 const std::type_info* type = LookupType(ptr);
459 const void* key = NULL;
463 TypeCount* count = type_size_map->FindMutable(key);
465 count->bytes += value->bytes;
468 type_size_map->Insert(key, TypeCount(value->bytes, 1));
473 void HeapProfileTable::DumpTypesIterator(const void* ptr,
475 const DumpArgs& args) {
478 const char* mangled_type_name = static_cast<const char*>(ptr);
479 len = snprintf(buf, sizeof(buf), "%6d: %8" PRId64 " @ %s\n",
480 count->objects, count->bytes,
481 mangled_type_name ? mangled_type_name : "(no_typeinfo)");
482 RawWrite(args.fd, buf, len);
484 #endif // defined(TYPE_PROFILING)
487 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
488 const DumpArgs& args) {
497 memset(&b, 0, sizeof(b));
499 b.alloc_size = v->bytes;
500 b.depth = v->bucket()->depth;
501 b.stack = v->bucket()->stack;
503 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
504 RawWrite(args.fd, buf, len);
508 void HeapProfileTable::DumpMarkedIterator(const void* ptr, AllocValue* v,
509 const DumpMarkedArgs& args) {
510 if (v->mark() != args.mark)
513 memset(&b, 0, sizeof(b));
515 b.alloc_size = v->bytes;
516 b.depth = v->bucket()->depth;
517 b.stack = v->bucket()->stack;
519 snprintf(addr, 16, "0x%08" PRIxPTR, ptr);
521 int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, NULL);
522 RawWrite(args.fd, buf, len);
526 void HeapProfileTable::AllocationAddressesIterator(
529 const AllocationAddressIteratorArgs& args) {
530 args.callback(args.data, ptr);
534 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v,
535 const MarkArgs& args) {
536 if (!args.mark_all && v->mark() != UNMARKED)
538 v->set_mark(args.mark);
541 // Callback from NonLiveSnapshot; adds entry to arg->dest
542 // if not the entry is not live and is not present in arg->base.
543 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
544 AddNonLiveArgs* arg) {
548 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
549 // Present in arg->base, so do not save
551 arg->dest->Add(ptr, *v);
556 bool HeapProfileTable::WriteProfile(const char* file_name,
558 AllocationMap* allocations) {
559 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
560 RawFD fd = RawOpenForWriting(file_name);
561 if (fd == kIllegalRawFD) {
562 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
565 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
567 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
569 RawWrite(fd, buf, len);
570 const DumpArgs args(fd, NULL);
571 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
572 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
573 DumpProcSelfMaps(fd);
578 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
579 if (!FLAGS_cleanup_old_heap_profiles)
582 snprintf(buf, 1000,"%s.%05d.", prefix, getpid());
583 string pattern = string(buf) + ".*" + kFileExt;
585 #if defined(HAVE_GLOB_H)
587 const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
588 if (r == 0 || r == GLOB_NOMATCH) {
589 const int prefix_length = strlen(prefix);
590 for (int i = 0; i < g.gl_pathc; i++) {
591 const char* fname = g.gl_pathv[i];
592 if ((strlen(fname) >= prefix_length) &&
593 (memcmp(fname, prefix, prefix_length) == 0)) {
594 RAW_VLOG(1, "Removing old heap profile %s", fname);
600 #else /* HAVE_GLOB_H */
601 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
605 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
606 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
607 address_map_->Iterate(AddToSnapshot, s);
611 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
616 // Callback from TakeSnapshot; adds a single entry to snapshot
617 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
618 Snapshot* snapshot) {
619 snapshot->Add(ptr, *v);
622 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
624 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
625 int(total_.allocs - total_.frees),
626 int(total_.alloc_size - total_.free_size));
628 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
632 address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
633 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
634 int(s->total_.allocs - s->total_.frees),
635 int(s->total_.alloc_size - s->total_.free_size));
639 // Information kept per unique bucket seen
640 struct HeapProfileTable::Snapshot::Entry {
644 Entry() : count(0), bytes(0) { }
646 // Order by decreasing bytes
647 bool operator<(const Entry& x) const {
648 return this->bytes > x.bytes;
652 // State used to generate leak report. We keep a mapping from Bucket pointer
653 // the collected stats for that bucket.
654 struct HeapProfileTable::Snapshot::ReportState {
655 map<Bucket*, Entry> buckets_;
658 // Callback from ReportLeaks; updates ReportState.
659 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
661 ReportState* state) {
662 Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
663 e->bucket = v->bucket();
665 e->bytes += v->bytes;
668 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
669 const char* filename,
670 bool should_symbolize) {
671 // This is only used by the heap leak checker, but is intimately
672 // tied to the allocation map that belongs in this module and is
673 // therefore placed here.
674 RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes "
675 "in %" PRIuS " objects",
677 size_t(total_.alloc_size),
678 size_t(total_.allocs));
680 // Group objects by Bucket
682 map_.Iterate(&ReportCallback, &state);
684 // Sort buckets by decreasing leaked size
685 const int n = state.buckets_.size();
686 Entry* entries = new Entry[n];
688 for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
689 iter != state.buckets_.end();
691 entries[dst++] = iter->second;
693 sort(entries, entries + n);
695 // Report a bounded number of leaks to keep the leak report from
697 const int to_report =
698 (FLAGS_heap_check_max_leaks > 0 &&
699 n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
700 RAW_LOG(ERROR, "The %d largest leaks:", to_report);
703 SymbolTable symbolization_table;
704 for (int i = 0; i < to_report; i++) {
705 const Entry& e = entries[i];
706 for (int j = 0; j < e.bucket->depth; j++) {
707 symbolization_table.Add(e.bucket->stack[j]);
710 static const int kBufSize = 2<<10;
711 char buffer[kBufSize];
712 if (should_symbolize)
713 symbolization_table.Symbolize();
714 for (int i = 0; i < to_report; i++) {
715 const Entry& e = entries[i];
716 base::RawPrinter printer(buffer, kBufSize);
717 printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
719 for (int j = 0; j < e.bucket->depth; j++) {
720 const void* pc = e.bucket->stack[j];
721 printer.Printf("\t@ %" PRIxPTR " %s\n",
722 reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
724 RAW_LOG(ERROR, "%s", buffer);
728 RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
733 // TODO: Dump the sorted Entry list instead of dumping raw data?
734 // (should be much shorter)
735 if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
736 RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
740 void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
743 // Perhaps also log the allocation stack trace (unsymbolized)
744 // on this line in case somebody finds it useful.
745 RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr);
748 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
750 map_.Iterate(ReportObject, &unused);