[LSAN] More LSAN interface tweaking.
authorKirill Stoimenov <kstoimenov@google.com>
Thu, 12 Jan 2023 21:31:49 +0000 (13:31 -0800)
committerVitaly Buka <vitalybuka@google.com>
Fri, 13 Jan 2023 01:58:11 +0000 (17:58 -0800)
Main goal is to remove thread registry dependency from the interface because HWASAN is using its own code to manage threads.

Reviewed By: vitalybuka, kstoimenov

Differential Revision: https://reviews.llvm.org/D140039

compiler-rt/lib/asan/asan_allocator.cpp
compiler-rt/lib/asan/asan_thread.cpp
compiler-rt/lib/hwasan/hwasan_thread.cpp
compiler-rt/lib/lsan/lsan_allocator.cpp
compiler-rt/lib/lsan/lsan_common.cpp
compiler-rt/lib/lsan/lsan_common.h
compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
compiler-rt/lib/lsan/lsan_fuchsia.cpp
compiler-rt/lib/lsan/lsan_thread.cpp

index 52d7eff..335de33 100644 (file)
@@ -1153,33 +1153,6 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
   return kIgnoreObjectSuccess;
 }
 
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
-  // Look for the arg pointer of threads that have been created or are running.
-  // This is necessary to prevent false positive leaks due to the AsanThread
-  // holding the only live reference to a heap object.  This can happen because
-  // the `pthread_create()` interceptor doesn't wait for the child thread to
-  // start before returning and thus loosing the the only live reference to the
-  // heap object on the stack.
-
-  __asan::AsanThreadContext *atctx =
-      reinterpret_cast<__asan::AsanThreadContext *>(tctx);
-  __asan::AsanThread *asan_thread = atctx->thread;
-
-  // Note ThreadStatusRunning is required because there is a small window where
-  // the thread status switches to `ThreadStatusRunning` but the `arg` pointer
-  // still isn't on the stack yet.
-  if (atctx->status != ThreadStatusCreated &&
-      atctx->status != ThreadStatusRunning)
-    return;
-
-  uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
-  if (!thread_arg)
-    return;
-
-  auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
-  ptrsVec->push_back(thread_arg);
-}
-
 }  // namespace __lsan
 
 // ---------------------- Interface ---------------- {{{1
index 4b5e4a3..116bdc4 100644 (file)
@@ -518,9 +518,44 @@ void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
   fake_stack->ForEachFakeFrame(callback, arg);
 }
 
-void RunCallbackForEachThreadLocked(__sanitizer::ThreadRegistry::ThreadCallback cb,
-                                    void *arg) {
-  GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(cb, arg);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+  GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+      [](ThreadContextBase *tctx, void *ptrs) {
+        // Look for the arg pointer of threads that have been created or are
+        // running. This is necessary to prevent false positive leaks due to the
+        // AsanThread holding the only live reference to a heap object.  This
+        // can happen because the `pthread_create()` interceptor doesn't wait
+        // for the child thread to start before returning and thus loosing the
+        // the only live reference to the heap object on the stack.
+
+        __asan::AsanThreadContext *atctx =
+            static_cast<__asan::AsanThreadContext *>(tctx);
+
+        // Note ThreadStatusRunning is required because there is a small window
+        // where the thread status switches to `ThreadStatusRunning` but the
+        // `arg` pointer still isn't on the stack yet.
+        if (atctx->status != ThreadStatusCreated &&
+            atctx->status != ThreadStatusRunning)
+          return;
+
+        uptr thread_arg = reinterpret_cast<uptr>(atctx->thread->get_arg());
+        if (!thread_arg)
+          return;
+
+        auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
+        ptrsVec->push_back(thread_arg);
+      },
+      ptrs);
+}
+
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+  GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+      [](ThreadContextBase *tctx, void *threads) {
+        if (tctx->status == ThreadStatusRunning)
+          reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+              tctx->os_id);
+      },
+      threads);
 }
 
 void FinishThreadLocked(u32 tid) {
index 8f490cc..4fc420f 100644 (file)
@@ -198,6 +198,6 @@ void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
                             void *arg) {}
 
 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
-void ReportUnsuspendedThreadsLocked(InternalMmapVector<tid_t> *threads) {}
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
 
 }  // namespace __lsan
index 43928ad..b18d829 100644 (file)
@@ -319,7 +319,7 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
   }
 }
 
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
   // This function can be used to treat memory reachable from `tctx` as live.
   // This is useful for threads that have been created but not yet started.
 
index 33c590c..d032a5d 100644 (file)
@@ -371,7 +371,7 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
 
 static void ProcessThreadRegistry(Frontier *frontier) {
   InternalMmapVector<uptr> ptrs;
-  RunCallbackForEachThreadLocked(GetAdditionalThreadContextPtrs, &ptrs);
+  GetAdditionalThreadContextPtrsLocked(&ptrs);
 
   for (uptr i = 0; i < ptrs.size(); ++i) {
     void *ptr = reinterpret_cast<void *>(ptrs[i]);
@@ -668,18 +668,6 @@ void LeakSuppressionContext::PrintMatchedSuppressions() {
   Printf("%s\n\n", line);
 }
 
-static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
-  const InternalMmapVector<tid_t> &suspended_threads =
-      *(const InternalMmapVector<tid_t> *)arg;
-  if (tctx->status == ThreadStatusRunning) {
-    uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
-    if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
-      Report(
-          "Running thread %llu was not suspended. False leaks are possible.\n",
-          tctx->os_id);
-  }
-}
-
 #  if SANITIZER_FUCHSIA
 
 // Fuchsia provides a libc interface that guarantees all threads are
@@ -696,7 +684,16 @@ static void ReportUnsuspendedThreads(
 
   Sort(threads.data(), threads.size());
 
-  RunCallbackForEachThreadLocked(&ReportIfNotSuspended, &threads);
+  InternalMmapVector<tid_t> unsuspended;
+  GetRunningThreadsLocked(&unsuspended);
+
+  for (auto os_id : unsuspended) {
+    uptr i = InternalLowerBound(threads, os_id);
+    if (i >= threads.size() || threads[i] != os_id)
+      Report(
+          "Running thread %zu was not suspended. False leaks are possible.\n",
+          os_id);
+  }
 }
 
 #  endif  // !SANITIZER_FUCHSIA
index 089aa10..b080b51 100644 (file)
@@ -105,9 +105,8 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
                             void *arg);
-
-void RunCallbackForEachThreadLocked(__sanitizer::ThreadRegistry::ThreadCallback cb,
-                                    void *arg);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
 
 //// --------------------------------------------------------------------------
 //// Allocator prototypes.
@@ -146,8 +145,6 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg);
 // Helper for __lsan_ignore_object().
 IgnoreObjectResult IgnoreObjectLocked(const void *p);
 
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
-
 // The rest of the LSan interface which is implemented by library.
 
 struct ScopedStopTheWorldLock {
index ac2eddf..551594b 100644 (file)
@@ -12,6 +12,7 @@
 //===---------------------------------------------------------------------===//
 
 #include "lsan_common.h"
+#include "lsan_thread.h"
 #include "sanitizer_common/sanitizer_platform.h"
 
 #if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA
@@ -146,7 +147,7 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
         // just for the allocator cache, and to call ForEachExtraStackRange,
         // which ASan needs.
         if (flags()->use_stacks) {
-          RunCallbackForEachThreadLocked(
+          GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
               [](ThreadContextBase *tctx, void *arg) {
                 ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb,
                                        arg);
index 1bcb748..03ac0af 100644 (file)
@@ -68,7 +68,7 @@ void InitializeMainThread() {
 }
 
 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
-  RunCallbackForEachThreadLocked(
+  GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
       [](ThreadContextBase *tctx, void *arg) {
         auto ctx = static_cast<ThreadContext *>(tctx);
         static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin());
index d04d905..0faff32 100644 (file)
@@ -87,9 +87,15 @@ ThreadRegistry *GetLsanThreadRegistryLocked() {
   return thread_registry;
 }
 
-void RunCallbackForEachThreadLocked(
-    __sanitizer::ThreadRegistry::ThreadCallback cb, void *arg) {
-  GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(cb, arg);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+  GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+      [](ThreadContextBase *tctx, void *threads) {
+        if (tctx->status == ThreadStatusRunning) {
+          reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+              tctx->os_id);
+        }
+      },
+      threads);
 }
 
 }  // namespace __lsan