1 //=-- lsan_common.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
12 //===----------------------------------------------------------------------===//
15 #include "lsan_common.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flag_parser.h"
19 #include "sanitizer_common/sanitizer_flags.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_procmaps.h"
22 #include "sanitizer_common/sanitizer_report_decorator.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "sanitizer_common/sanitizer_stacktrace.h"
25 #include "sanitizer_common/sanitizer_suppressions.h"
26 #include "sanitizer_common/sanitizer_thread_registry.h"
27 #include "sanitizer_common/sanitizer_tls_get_addr.h"
29 #if CAN_SANITIZE_LEAKS
32 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
33 # if SANITIZER_IOS && !SANITIZER_IOSSIM
34 # define OBJC_DATA_MASK 0x0000007ffffffff8UL
36 # define OBJC_DATA_MASK 0x00007ffffffffff8UL
38 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
39 # define OBJC_FAST_IS_RW 0x8000000000000000UL
44 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
45 // also to protect the global list of root regions.
50 void DisableCounterUnderflow() {
51 if (common_flags()->detect_leaks) {
52 Report("Unmatched call to __lsan_enable().\n");
57 void Flags::SetDefaults() {
58 # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
59 # include "lsan_flags.inc"
63 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
64 # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
65 RegisterFlag(parser, #Name, Description, &f->Name);
66 # include "lsan_flags.inc"
70 # define LOG_POINTERS(...) \
72 if (flags()->log_pointers) \
73 Report(__VA_ARGS__); \
76 # define LOG_THREADS(...) \
78 if (flags()->log_threads) \
79 Report(__VA_ARGS__); \
82 class LeakSuppressionContext {
84 SuppressionContext context;
85 bool suppressed_stacks_sorted = true;
86 InternalMmapVector<u32> suppressed_stacks;
87 const LoadedModule *suppress_module = nullptr;
90 Suppression *GetSuppressionForAddr(uptr addr);
91 bool SuppressInvalid(const StackTrace &stack);
92 bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
95 LeakSuppressionContext(const char *supprression_types[],
96 int suppression_types_num)
97 : context(supprression_types, suppression_types_num) {}
99 bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
101 const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
102 if (!suppressed_stacks_sorted) {
103 suppressed_stacks_sorted = true;
104 SortAndDedup(suppressed_stacks);
106 return suppressed_stacks;
108 void PrintMatchedSuppressions();
111 ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
112 static LeakSuppressionContext *suppression_ctx = nullptr;
113 static const char kSuppressionLeak[] = "leak";
114 static const char *kSuppressionTypes[] = {kSuppressionLeak};
115 static const char kStdSuppressions[] =
116 # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
117 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
119 "leak:*pthread_exit*\n"
120 # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
122 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
125 // TLS leak in some glibc versions, described in
126 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
127 "leak:*tls_get_addr*\n";
129 void InitializeSuppressions() {
130 CHECK_EQ(nullptr, suppression_ctx);
131 suppression_ctx = new (suppression_placeholder)
132 LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
135 void LeakSuppressionContext::LazyInit() {
138 context.ParseFromFile(flags()->suppressions);
139 if (&__lsan_default_suppressions)
140 context.Parse(__lsan_default_suppressions());
141 context.Parse(kStdSuppressions);
142 if (flags()->use_tls && flags()->use_ld_allocations)
143 suppress_module = GetLinker();
147 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
148 Suppression *s = nullptr;
150 // Suppress by module name.
151 const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
153 module_name = "<unknown module>";
154 if (context.Match(module_name, kSuppressionLeak, &s))
157 // Suppress by file or function name.
158 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
159 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
160 if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
161 context.Match(cur->info.file, kSuppressionLeak, &s)) {
169 static uptr GetCallerPC(const StackTrace &stack) {
170 // The top frame is our malloc/calloc/etc. The next frame is the caller.
172 return stack.trace[1];
177 // Objective-C class data pointers are stored with flags in the low bits, so
178 // they need to be transformed back into something that looks like a pointer.
179 static inline void *MaybeTransformPointer(void *p) {
180 uptr ptr = reinterpret_cast<uptr>(p);
181 if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
182 ptr &= OBJC_DATA_MASK;
183 return reinterpret_cast<void *>(ptr);
187 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
188 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
189 // modules accounting etc.
190 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
191 // They are allocated with a __libc_memalign() call in allocate_and_init()
192 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
193 // blocks, but we can make sure they come from our own allocator by intercepting
194 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
195 // addresses are stored in a dynamically allocated array (the DTV) which is
196 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
197 // being reachable from the static TLS, and the dynamic TLS being reachable from
198 // the DTV. This is because the initial DTV is allocated before our interception
199 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
200 // can't special-case it either, since we don't know its size.
201 // Our solution is to include in the root set all allocations made from
202 // ld-linux.so (which is where allocate_and_init() is implemented). This is
203 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
204 // which we don't care about).
205 // On all other platforms, this simply checks to ensure that the caller pc is
206 // valid before reporting chunks as leaked.
207 bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
208 uptr caller_pc = GetCallerPC(stack);
209 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
210 // it as reachable, as we can't properly report its allocation stack anyway.
212 (suppress_module && suppress_module->containsAddress(caller_pc));
215 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
216 uptr hit_count, uptr total_size) {
217 for (uptr i = 0; i < stack.size; i++) {
218 Suppression *s = GetSuppressionForAddr(
219 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
221 s->weight += total_size;
222 atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
229 bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
232 StackTrace stack = StackDepotGet(stack_trace_id);
233 if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
235 suppressed_stacks_sorted = false;
236 suppressed_stacks.push_back(stack_trace_id);
240 static LeakSuppressionContext *GetSuppressionContext() {
241 CHECK(suppression_ctx);
242 return suppression_ctx;
245 static InternalMmapVectorNoCtor<RootRegion> root_regions;
247 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
248 return &root_regions;
251 void InitCommonLsan() {
252 if (common_flags()->detect_leaks) {
253 // Initialization which can fail or print warnings should only be done if
254 // LSan is actually enabled.
255 InitializeSuppressions();
256 InitializePlatformSpecificModules();
260 class Decorator : public __sanitizer::SanitizerCommonDecorator {
262 Decorator() : SanitizerCommonDecorator() {}
263 const char *Error() { return Red(); }
264 const char *Leak() { return Blue(); }
267 static inline bool MaybeUserPointer(uptr p) {
268 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
269 // bound on heap addresses.
270 const uptr kMinAddress = 4 * 4096;
273 # if defined(__x86_64__)
274 // Accept only canonical form user-space addresses.
275 return ((p >> 47) == 0);
276 # elif defined(__mips64)
277 return ((p >> 40) == 0);
278 # elif defined(__aarch64__)
279 // Accept up to 48 bit VMA.
280 return ((p >> 48) == 0);
286 // Scans the memory range, looking for byte patterns that point into allocator
287 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
288 // There are two usage modes for this function: finding reachable chunks
289 // (|tag| = kReachable) and finding indirectly leaked chunks
290 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
291 // so |frontier| = 0.
292 void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
293 const char *region_type, ChunkTag tag) {
294 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
295 const uptr alignment = flags()->pointer_alignment();
296 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
300 pp = pp + alignment - pp % alignment;
301 for (; pp + sizeof(void *) <= end; pp += alignment) {
302 void *p = *reinterpret_cast<void **>(pp);
304 p = MaybeTransformPointer(p);
306 if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
308 uptr chunk = PointsIntoChunk(p);
311 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
314 LsanMetadata m(chunk);
315 if (m.tag() == kReachable || m.tag() == kIgnored)
318 // Do this check relatively late so we can log only the interesting cases.
319 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
321 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
323 (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
329 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
330 (void *)pp, p, (void *)chunk,
331 (void *)(chunk + m.requested_size()), m.requested_size());
333 frontier->push_back(chunk);
337 // Scans a global range for pointers
338 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
339 uptr allocator_begin = 0, allocator_end = 0;
340 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
341 if (begin <= allocator_begin && allocator_begin < end) {
342 CHECK_LE(allocator_begin, allocator_end);
343 CHECK_LE(allocator_end, end);
344 if (begin < allocator_begin)
345 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
347 if (allocator_end < end)
348 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
350 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
354 void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
355 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
356 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
359 # if SANITIZER_FUCHSIA
361 // Fuchsia handles all threads together with its own callback.
362 static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
367 # if SANITIZER_ANDROID
368 // FIXME: Move this out into *libcdep.cpp
369 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
370 pid_t, void (*cb)(void *, void *, uptr, void *), void *);
373 static void ProcessThreadRegistry(Frontier *frontier) {
374 InternalMmapVector<uptr> ptrs;
375 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
376 GetAdditionalThreadContextPtrs, &ptrs);
378 for (uptr i = 0; i < ptrs.size(); ++i) {
379 void *ptr = reinterpret_cast<void *>(ptrs[i]);
380 uptr chunk = PointsIntoChunk(ptr);
383 LsanMetadata m(chunk);
387 // Mark as reachable and add to frontier.
388 LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
389 m.set_tag(kReachable);
390 frontier->push_back(chunk);
394 // Scans thread data (stacks and TLS) for heap pointers.
395 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
396 Frontier *frontier, tid_t caller_tid,
398 InternalMmapVector<uptr> registers;
399 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
400 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
401 LOG_THREADS("Processing thread %llu.\n", os_id);
402 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
405 GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
406 &tls_end, &cache_begin, &cache_end, &dtls);
408 // If a thread can't be found in the thread registry, it's probably in the
409 // process of destruction. Log this event and move on.
410 LOG_THREADS("Thread %llu not found in registry.\n", os_id);
414 PtraceRegistersStatus have_registers =
415 suspended_threads.GetRegistersAndSP(i, ®isters, &sp);
416 if (have_registers != REGISTERS_AVAILABLE) {
417 Report("Unable to get registers from thread %llu.\n", os_id);
418 // If unable to get SP, consider the entire stack to be reachable unless
419 // GetRegistersAndSP failed with ESRCH.
420 if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
424 if (suspended_threads.GetThreadID(i) == caller_tid) {
428 if (flags()->use_registers && have_registers) {
429 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
431 reinterpret_cast<uptr>(registers.data() + registers.size());
432 ScanRangeForPointers(registers_begin, registers_end, frontier,
433 "REGISTERS", kReachable);
436 if (flags()->use_stacks) {
437 LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
438 (void *)stack_end, (void *)sp);
439 if (sp < stack_begin || sp >= stack_end) {
440 // SP is outside the recorded stack range (e.g. the thread is running a
441 // signal handler on alternate stack, or swapcontext was used).
442 // Again, consider the entire stack range to be reachable.
443 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
444 uptr page_size = GetPageSizeCached();
446 while (stack_begin < stack_end &&
447 !IsAccessibleMemoryRange(stack_begin, 1)) {
449 stack_begin += page_size;
451 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
452 skipped, (void *)stack_begin, (void *)stack_end);
454 // Shrink the stack range to ignore out-of-scope values.
457 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
459 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
462 // TODO: support static TLS for ARM and x86.
463 #if !defined(__arm__) && !defined(__i386__)
464 if (flags()->use_tls) {
466 LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
467 // If the tls and cache ranges don't overlap, scan full tls range,
468 // otherwise, only scan the non-overlapping portions
469 if (cache_begin == cache_end || tls_end < cache_begin ||
470 tls_begin > cache_end) {
471 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
473 if (tls_begin < cache_begin)
474 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
476 if (tls_end > cache_end)
477 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
481 # if SANITIZER_ANDROID
482 auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
484 ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
485 reinterpret_cast<uptr>(dtls_end),
486 reinterpret_cast<Frontier *>(arg), "DTLS",
490 // FIXME: There might be a race-condition here (and in Bionic) if the
491 // thread is suspended in the middle of updating its DTLS. IOWs, we
492 // could scan already freed memory. (probably fine for now)
493 __libc_iterate_dynamic_tls(os_id, cb, frontier);
495 if (dtls && !DTLSInDestruction(dtls)) {
496 ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
497 uptr dtls_beg = dtv.beg;
498 uptr dtls_end = dtls_beg + dtv.size;
499 if (dtls_beg < dtls_end) {
500 LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
502 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
507 // We are handling a thread with DTLS under destruction. Log about
508 // this and continue.
509 LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
516 // Add pointers reachable from ThreadContexts
517 ProcessThreadRegistry(frontier);
520 # endif // SANITIZER_FUCHSIA
522 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
523 uptr region_begin, uptr region_end, bool is_readable) {
524 uptr intersection_begin = Max(root_region.begin, region_begin);
525 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
526 if (intersection_begin >= intersection_end)
528 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
529 (void *)root_region.begin,
530 (void *)(root_region.begin + root_region.size),
531 (void *)region_begin, (void *)region_end,
532 is_readable ? "readable" : "unreadable");
534 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
538 static void ProcessRootRegion(Frontier *frontier,
539 const RootRegion &root_region) {
540 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
541 MemoryMappedSegment segment;
542 while (proc_maps.Next(&segment)) {
543 ScanRootRegion(frontier, root_region, segment.start, segment.end,
544 segment.IsReadable());
548 // Scans root regions for heap pointers.
549 static void ProcessRootRegions(Frontier *frontier) {
550 if (!flags()->use_root_regions)
552 for (uptr i = 0; i < root_regions.size(); i++)
553 ProcessRootRegion(frontier, root_regions[i]);
556 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
557 while (frontier->size()) {
558 uptr next_chunk = frontier->back();
559 frontier->pop_back();
560 LsanMetadata m(next_chunk);
561 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
566 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
567 // which are reachable from it as indirectly leaked.
568 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
569 // On 32-bit archs it's hard to distinguish between direct and indirect leaks.
570 #if SANITIZER_WORDSIZE == 32
573 chunk = GetUserBegin(chunk);
574 LsanMetadata m(chunk);
575 if (m.allocated() && m.tag() != kReachable) {
576 ScanRangeForPointers(chunk, chunk + m.requested_size(),
577 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
581 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
583 chunk = GetUserBegin(chunk);
584 LsanMetadata m(chunk);
585 if (!m.allocated() || m.tag() == kIgnored)
588 const InternalMmapVector<u32> &suppressed =
589 *static_cast<const InternalMmapVector<u32> *>(arg);
590 uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
591 if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
594 LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
595 (void *)(chunk + m.requested_size()), m.requested_size());
599 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
601 static void CollectIgnoredCb(uptr chunk, void *arg) {
603 chunk = GetUserBegin(chunk);
604 LsanMetadata m(chunk);
605 if (m.allocated() && m.tag() == kIgnored) {
606 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
607 (void *)(chunk + m.requested_size()), m.requested_size());
608 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
612 // Sets the appropriate tag on each chunk.
613 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
614 Frontier *frontier, tid_t caller_tid,
616 const InternalMmapVector<u32> &suppressed_stacks =
617 GetSuppressionContext()->GetSortedSuppressedStacks();
618 if (!suppressed_stacks.empty()) {
619 ForEachChunk(IgnoredSuppressedCb,
620 const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
622 ForEachChunk(CollectIgnoredCb, frontier);
623 ProcessGlobalRegions(frontier);
624 ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
625 ProcessRootRegions(frontier);
626 FloodFillTag(frontier, kReachable);
628 // The check here is relatively expensive, so we do this in a separate flood
629 // fill. That way we can skip the check for chunks that are reachable
631 LOG_POINTERS("Processing platform-specific allocations.\n");
632 ProcessPlatformSpecificAllocations(frontier);
633 FloodFillTag(frontier, kReachable);
635 // Iterate over leaked chunks and mark those that are reachable from other
637 LOG_POINTERS("Scanning leaked chunks.\n");
638 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
641 // ForEachChunk callback. Resets the tags to pre-leak-check state.
642 static void ResetTagsCb(uptr chunk, void *arg) {
644 chunk = GetUserBegin(chunk);
645 LsanMetadata m(chunk);
646 if (m.allocated() && m.tag() != kIgnored)
647 m.set_tag(kDirectlyLeaked);
650 // ForEachChunk callback. Aggregates information about unreachable chunks into
652 static void CollectLeaksCb(uptr chunk, void *arg) {
654 LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
655 chunk = GetUserBegin(chunk);
656 LsanMetadata m(chunk);
659 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
660 leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
663 void LeakSuppressionContext::PrintMatchedSuppressions() {
664 InternalMmapVector<Suppression *> matched;
665 context.GetMatched(&matched);
668 const char *line = "-----------------------------------------------------";
669 Printf("%s\n", line);
670 Printf("Suppressions used:\n");
671 Printf(" count bytes template\n");
672 for (uptr i = 0; i < matched.size(); i++) {
673 Printf("%7zu %10zu %s\n",
674 static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
675 matched[i]->weight, matched[i]->templ);
677 Printf("%s\n\n", line);
680 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
681 const InternalMmapVector<tid_t> &suspended_threads =
682 *(const InternalMmapVector<tid_t> *)arg;
683 if (tctx->status == ThreadStatusRunning) {
684 uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
685 if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
687 "Running thread %llu was not suspended. False leaks are possible.\n",
692 # if SANITIZER_FUCHSIA
694 // Fuchsia provides a libc interface that guarantees all threads are
695 // covered, and SuspendedThreadList is never really used.
696 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
698 # else // !SANITIZER_FUCHSIA
700 static void ReportUnsuspendedThreads(
701 const SuspendedThreadsList &suspended_threads) {
702 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
703 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
704 threads[i] = suspended_threads.GetThreadID(i);
706 Sort(threads.data(), threads.size());
708 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
709 &ReportIfNotSuspended, &threads);
712 # endif // !SANITIZER_FUCHSIA
714 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
716 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
718 CHECK(!param->success);
719 ReportUnsuspendedThreads(suspended_threads);
720 ClassifyAllChunks(suspended_threads, ¶m->frontier, param->caller_tid,
722 ForEachChunk(CollectLeaksCb, ¶m->leaks);
723 // Clean up for subsequent leak checks. This assumes we did not overwrite any
725 ForEachChunk(ResetTagsCb, nullptr);
726 param->success = true;
729 static bool PrintResults(LeakReport &report) {
730 uptr unsuppressed_count = report.UnsuppressedLeakCount();
731 if (unsuppressed_count) {
735 "================================================================="
737 Printf("%s", d.Error());
738 Report("ERROR: LeakSanitizer: detected memory leaks\n");
739 Printf("%s", d.Default());
740 report.ReportTopLeaks(flags()->max_leaks);
742 if (common_flags()->print_suppressions)
743 GetSuppressionContext()->PrintMatchedSuppressions();
744 if (unsuppressed_count > 0) {
745 report.PrintSummary();
746 if (common_flags()->print_cmdline)
753 static bool CheckForLeaks() {
754 if (&__lsan_is_turned_off && __lsan_is_turned_off())
756 lsan_check_in_progress = true;
757 // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
758 // suppressions. However if a stack id was previously suppressed, it should be
759 // suppressed in future checks as well.
760 for (int i = 0;; ++i) {
761 EnsureMainThreadIDIsCorrect();
762 CheckForLeaksParam param;
763 // Capture calling thread's stack pointer early, to avoid false negatives.
764 // Old frame with dead pointers might be overlapped by new frame inside
765 // CheckForLeaks which does not use bytes with pointers before the
766 // threads are suspended and stack pointers captured.
767 param.caller_tid = GetTid();
768 param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
769 LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
770 if (!param.success) {
771 Report("LeakSanitizer has encountered a fatal error.\n");
773 "HINT: For debugging, try setting environment variable "
774 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
776 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
780 LeakReport leak_report;
781 leak_report.AddLeakedChunks(param.leaks);
782 lsan_check_in_progress = false;
784 // No new suppressions stacks, so rerun will not help and we can report.
785 if (!leak_report.ApplySuppressions())
786 return PrintResults(leak_report);
788 // No indirect leaks to report, so we are done here.
789 if (!leak_report.IndirectUnsuppressedLeakCount())
790 return PrintResults(leak_report);
793 Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
794 return PrintResults(leak_report);
797 // We found a new previously unseen suppressed call stack. Rerun to make
798 // sure it does not hold indirect leaks.
799 VReport(1, "Rerun with %zu suppressed stacks.",
800 GetSuppressionContext()->GetSortedSuppressedStacks().size());
804 static bool has_reported_leaks = false;
805 bool HasReportedLeaks() { return has_reported_leaks; }
808 Lock l(&global_mutex);
809 static bool already_done;
813 has_reported_leaks = CheckForLeaks();
814 if (has_reported_leaks)
818 static int DoRecoverableLeakCheck() {
819 Lock l(&global_mutex);
820 bool have_leaks = CheckForLeaks();
821 return have_leaks ? 1 : 0;
824 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
826 ///// LeakReport implementation. /////
828 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
829 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
830 // in real-world applications.
831 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
832 const uptr kMaxLeaksConsidered = 5000;
834 void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
835 for (const LeakedChunk &leak : chunks) {
836 uptr chunk = leak.chunk;
837 u32 stack_trace_id = leak.stack_trace_id;
838 uptr leaked_size = leak.leaked_size;
839 ChunkTag tag = leak.tag;
840 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
842 if (u32 resolution = flags()->resolution) {
843 StackTrace stack = StackDepotGet(stack_trace_id);
844 stack.size = Min(stack.size, resolution);
845 stack_trace_id = StackDepotPut(stack);
848 bool is_directly_leaked = (tag == kDirectlyLeaked);
850 for (i = 0; i < leaks_.size(); i++) {
851 if (leaks_[i].stack_trace_id == stack_trace_id &&
852 leaks_[i].is_directly_leaked == is_directly_leaked) {
853 leaks_[i].hit_count++;
854 leaks_[i].total_size += leaked_size;
858 if (i == leaks_.size()) {
859 if (leaks_.size() == kMaxLeaksConsidered)
861 Leak leak = {next_id_++, /* hit_count */ 1,
862 leaked_size, stack_trace_id,
863 is_directly_leaked, /* is_suppressed */ false};
864 leaks_.push_back(leak);
866 if (flags()->report_objects) {
867 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
868 leaked_objects_.push_back(obj);
873 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
874 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
875 return leak1.total_size > leak2.total_size;
877 return leak1.is_directly_leaked;
880 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
881 CHECK(leaks_.size() <= kMaxLeaksConsidered);
883 if (leaks_.size() == kMaxLeaksConsidered)
885 "Too many leaks! Only the first %zu leaks encountered will be "
887 kMaxLeaksConsidered);
889 uptr unsuppressed_count = UnsuppressedLeakCount();
890 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
891 Printf("The %zu top leak(s):\n", num_leaks_to_report);
892 Sort(leaks_.data(), leaks_.size(), &LeakComparator);
893 uptr leaks_reported = 0;
894 for (uptr i = 0; i < leaks_.size(); i++) {
895 if (leaks_[i].is_suppressed)
897 PrintReportForLeak(i);
899 if (leaks_reported == num_leaks_to_report)
902 if (leaks_reported < unsuppressed_count) {
903 uptr remaining = unsuppressed_count - leaks_reported;
904 Printf("Omitting %zu more leak(s).\n", remaining);
908 void LeakReport::PrintReportForLeak(uptr index) {
910 Printf("%s", d.Leak());
911 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
912 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
913 leaks_[index].total_size, leaks_[index].hit_count);
914 Printf("%s", d.Default());
916 CHECK(leaks_[index].stack_trace_id);
917 StackDepotGet(leaks_[index].stack_trace_id).Print();
919 if (flags()->report_objects) {
920 Printf("Objects leaked above:\n");
921 PrintLeakedObjectsForLeak(index);
926 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
927 u32 leak_id = leaks_[index].id;
928 for (uptr j = 0; j < leaked_objects_.size(); j++) {
929 if (leaked_objects_[j].leak_id == leak_id)
930 Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
931 leaked_objects_[j].size);
935 void LeakReport::PrintSummary() {
936 CHECK(leaks_.size() <= kMaxLeaksConsidered);
937 uptr bytes = 0, allocations = 0;
938 for (uptr i = 0; i < leaks_.size(); i++) {
939 if (leaks_[i].is_suppressed)
941 bytes += leaks_[i].total_size;
942 allocations += leaks_[i].hit_count;
944 InternalScopedString summary;
945 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
947 ReportErrorSummary(summary.data());
950 uptr LeakReport::ApplySuppressions() {
951 LeakSuppressionContext *suppressions = GetSuppressionContext();
952 uptr new_suppressions = false;
953 for (uptr i = 0; i < leaks_.size(); i++) {
954 if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
955 leaks_[i].total_size)) {
956 leaks_[i].is_suppressed = true;
960 return new_suppressions;
963 uptr LeakReport::UnsuppressedLeakCount() {
965 for (uptr i = 0; i < leaks_.size(); i++)
966 if (!leaks_[i].is_suppressed)
971 uptr LeakReport::IndirectUnsuppressedLeakCount() {
973 for (uptr i = 0; i < leaks_.size(); i++)
974 if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
979 } // namespace __lsan
980 #else // CAN_SANITIZE_LEAKS
982 void InitCommonLsan() {}
983 void DoLeakCheck() {}
984 void DoRecoverableLeakCheckVoid() {}
985 void DisableInThisThread() {}
986 void EnableInThisThread() {}
987 } // namespace __lsan
988 #endif // CAN_SANITIZE_LEAKS
990 using namespace __lsan;
993 SANITIZER_INTERFACE_ATTRIBUTE
994 void __lsan_ignore_object(const void *p) {
995 #if CAN_SANITIZE_LEAKS
996 if (!common_flags()->detect_leaks)
998 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
1000 Lock l(&global_mutex);
1001 IgnoreObjectResult res = IgnoreObjectLocked(p);
1002 if (res == kIgnoreObjectInvalid)
1003 VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
1004 if (res == kIgnoreObjectAlreadyIgnored)
1006 "__lsan_ignore_object(): "
1007 "heap object at %p is already being ignored\n",
1009 if (res == kIgnoreObjectSuccess)
1010 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
1011 #endif // CAN_SANITIZE_LEAKS
1014 SANITIZER_INTERFACE_ATTRIBUTE
1015 void __lsan_register_root_region(const void *begin, uptr size) {
1016 #if CAN_SANITIZE_LEAKS
1017 RootRegion region = {reinterpret_cast<uptr>(begin), size};
1018 root_regions.push_back(region);
1019 VReport(1, "Registered root region at %p of size %zu\n", begin, size);
1020 #endif // CAN_SANITIZE_LEAKS
1023 SANITIZER_INTERFACE_ATTRIBUTE
1024 void __lsan_unregister_root_region(const void *begin, uptr size) {
1025 #if CAN_SANITIZE_LEAKS
1026 bool removed = false;
1027 for (uptr i = 0; i < root_regions.size(); i++) {
1028 RootRegion region = root_regions[i];
1029 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
1031 uptr last_index = root_regions.size() - 1;
1032 root_regions[i] = root_regions[last_index];
1033 root_regions.pop_back();
1034 VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
1040 "__lsan_unregister_root_region(): region at %p of size %zu has not "
1041 "been registered.\n",
1045 #endif // CAN_SANITIZE_LEAKS
1048 SANITIZER_INTERFACE_ATTRIBUTE
1049 void __lsan_disable() {
1050 #if CAN_SANITIZE_LEAKS
1051 __lsan::DisableInThisThread();
1055 SANITIZER_INTERFACE_ATTRIBUTE
1056 void __lsan_enable() {
1057 #if CAN_SANITIZE_LEAKS
1058 __lsan::EnableInThisThread();
1062 SANITIZER_INTERFACE_ATTRIBUTE
1063 void __lsan_do_leak_check() {
1064 #if CAN_SANITIZE_LEAKS
1065 if (common_flags()->detect_leaks)
1066 __lsan::DoLeakCheck();
1067 #endif // CAN_SANITIZE_LEAKS
1070 SANITIZER_INTERFACE_ATTRIBUTE
1071 int __lsan_do_recoverable_leak_check() {
1072 #if CAN_SANITIZE_LEAKS
1073 if (common_flags()->detect_leaks)
1074 return __lsan::DoRecoverableLeakCheck();
1075 #endif // CAN_SANITIZE_LEAKS
1079 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1083 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1084 SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
1088 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {