void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg) {
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t)
return;
__asan::FakeStack *fake_stack = t->get_fake_stack();
if (!fake_stack)
return;
- fake_stack->ForEachFakeFrame(callback, arg);
+
+ fake_stack->ForEachFakeFrame(
+ [](uptr begin, uptr end, void *arg) {
+ reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
+ {begin : begin, end : end});
+ },
+ ranges);
+}
+
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {
+ GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *arg) {
+ GetThreadExtraStackRangesLocked(
+ tctx->os_id, reinterpret_cast<InternalMmapVector<Range> *>(arg));
+ },
+ ranges);
}
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg) {}
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
}
}
-void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
- Frontier *frontier = reinterpret_cast<Frontier *>(arg);
- ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier) {
+ for (uptr i = 0; i < ranges.size(); i++) {
+ ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
+ kReachable);
+ }
}
# if SANITIZER_FUCHSIA
Frontier *frontier, tid_t caller_tid,
uptr caller_sp) {
InternalMmapVector<uptr> registers;
+ InternalMmapVector<Range> extra_ranges;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %llu.\n", os_id);
}
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
kReachable);
- ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
+ extra_ranges.clear();
+ GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
+ ScanExtraStackRanges(extra_ranges, frontier);
}
if (flags()->use_tls) {
kIgnoreObjectInvalid
};
+struct Range {
+ uptr begin;
+ uptr end;
+};
+
//// --------------------------------------------------------------------------
//// Poisoning prototypes.
//// --------------------------------------------------------------------------
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg);
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges);
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
uptr region_begin, uptr region_end, bool is_readable);
-void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg);
// Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
Frontier *frontier,
const char *region_type, ChunkTag tag);
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
+void ScanExtraStackRanges(tid_t os_id, Frontier *frontier);
// Functions called from the parent tool.
const char *MaybeCallLsanDefaultOptions();
// We don't use the thread registry at all for enumerating the threads
// and their stacks, registers, and TLS regions. So use it separately
- // just for the allocator cache, and to call ForEachExtraStackRange,
+ // just for the allocator cache, and to call ScanExtraStackRanges,
// which ASan needs.
if (flags()->use_stacks) {
- GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
- [](ThreadContextBase *tctx, void *arg) {
- ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb,
- arg);
- },
- ¶ms->argument->frontier);
+ InternalMmapVector<Range> ranges;
+ GetThreadExtraStackRangesLocked(&ranges);
+ ScanExtraStackRanges(ranges, ¶ms->argument->frontier);
}
-
params->callback(SuspendedThreadsListFuchsia(), params->argument);
},
¶ms);
///// Interface to the common LSan module. /////
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg) {}
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
void LockThreadRegistry() { thread_registry->Lock(); }