Main goal is to remove thread registry dependency from the interface because HWASAN is using its own code to manage threads.
Reviewed By: vitalybuka, kstoimenov
Differential Revision: https://reviews.llvm.org/D140039
return kIgnoreObjectSuccess;
}
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
- // Look for the arg pointer of threads that have been created or are running.
- // This is necessary to prevent false positive leaks due to the AsanThread
- // holding the only live reference to a heap object. This can happen because
- // the `pthread_create()` interceptor doesn't wait for the child thread to
- // start before returning and thus loosing the the only live reference to the
- // heap object on the stack.
-
- __asan::AsanThreadContext *atctx =
- reinterpret_cast<__asan::AsanThreadContext *>(tctx);
- __asan::AsanThread *asan_thread = atctx->thread;
-
- // Note ThreadStatusRunning is required because there is a small window where
- // the thread status switches to `ThreadStatusRunning` but the `arg` pointer
- // still isn't on the stack yet.
- if (atctx->status != ThreadStatusCreated &&
- atctx->status != ThreadStatusRunning)
- return;
-
- uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
- if (!thread_arg)
- return;
-
- auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
- ptrsVec->push_back(thread_arg);
-}
-
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
fake_stack->ForEachFakeFrame(callback, arg);
}
-void RunCallbackForEachThreadLocked(__sanitizer::ThreadRegistry::ThreadCallback cb,
- void *arg) {
- GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(cb, arg);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *ptrs) {
+ // Look for the arg pointer of threads that have been created or are
+ // running. This is necessary to prevent false positive leaks due to the
+ // AsanThread holding the only live reference to a heap object. This
+ // can happen because the `pthread_create()` interceptor doesn't wait
+ // for the child thread to start before returning and thus loosing the
+ // the only live reference to the heap object on the stack.
+
+ __asan::AsanThreadContext *atctx =
+ static_cast<__asan::AsanThreadContext *>(tctx);
+
+ // Note ThreadStatusRunning is required because there is a small window
+ // where the thread status switches to `ThreadStatusRunning` but the
+ // `arg` pointer still isn't on the stack yet.
+ if (atctx->status != ThreadStatusCreated &&
+ atctx->status != ThreadStatusRunning)
+ return;
+
+ uptr thread_arg = reinterpret_cast<uptr>(atctx->thread->get_arg());
+ if (!thread_arg)
+ return;
+
+ auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
+ ptrsVec->push_back(thread_arg);
+ },
+ ptrs);
+}
+
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+ GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *threads) {
+ if (tctx->status == ThreadStatusRunning)
+ reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ tctx->os_id);
+ },
+ threads);
}
void FinishThreadLocked(u32 tid) {
void *arg) {}
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
-void ReportUnsuspendedThreadsLocked(InternalMmapVector<tid_t> *threads) {}
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
} // namespace __lsan
}
}
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
// This function can be used to treat memory reachable from `tctx` as live.
// This is useful for threads that have been created but not yet started.
static void ProcessThreadRegistry(Frontier *frontier) {
InternalMmapVector<uptr> ptrs;
- RunCallbackForEachThreadLocked(GetAdditionalThreadContextPtrs, &ptrs);
+ GetAdditionalThreadContextPtrsLocked(&ptrs);
for (uptr i = 0; i < ptrs.size(); ++i) {
void *ptr = reinterpret_cast<void *>(ptrs[i]);
Printf("%s\n\n", line);
}
-static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
- const InternalMmapVector<tid_t> &suspended_threads =
- *(const InternalMmapVector<tid_t> *)arg;
- if (tctx->status == ThreadStatusRunning) {
- uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
- if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
- Report(
- "Running thread %llu was not suspended. False leaks are possible.\n",
- tctx->os_id);
- }
-}
-
# if SANITIZER_FUCHSIA
// Fuchsia provides a libc interface that guarantees all threads are
Sort(threads.data(), threads.size());
- RunCallbackForEachThreadLocked(&ReportIfNotSuspended, &threads);
+ InternalMmapVector<tid_t> unsuspended;
+ GetRunningThreadsLocked(&unsuspended);
+
+ for (auto os_id : unsuspended) {
+ uptr i = InternalLowerBound(threads, os_id);
+ if (i >= threads.size() || threads[i] != os_id)
+ Report(
+ "Running thread %zu was not suspended. False leaks are possible.\n",
+ os_id);
+ }
}
# endif // !SANITIZER_FUCHSIA
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg);
-
-void RunCallbackForEachThreadLocked(__sanitizer::ThreadRegistry::ThreadCallback cb,
- void *arg);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
//// --------------------------------------------------------------------------
//// Allocator prototypes.
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
-
// The rest of the LSan interface which is implemented by library.
struct ScopedStopTheWorldLock {
//===---------------------------------------------------------------------===//
#include "lsan_common.h"
+#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_platform.h"
#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA
// just for the allocator cache, and to call ForEachExtraStackRange,
// which ASan needs.
if (flags()->use_stacks) {
- RunCallbackForEachThreadLocked(
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
[](ThreadContextBase *tctx, void *arg) {
ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb,
arg);
}
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
- RunCallbackForEachThreadLocked(
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
[](ThreadContextBase *tctx, void *arg) {
auto ctx = static_cast<ThreadContext *>(tctx);
static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin());
return thread_registry;
}
-void RunCallbackForEachThreadLocked(
- __sanitizer::ThreadRegistry::ThreadCallback cb, void *arg) {
- GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(cb, arg);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *threads) {
+ if (tctx->status == ThreadStatusRunning) {
+ reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ tctx->os_id);
+ }
+ },
+ threads);
}
} // namespace __lsan