#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
+#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
}
} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
+ auto &tl = __hwasan::hwasanThreadList();
+ tl.CheckLocked();
+ return &tl;
+}
+
+static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
+ return GetHwasanThreadListLocked()->FindThreadLocked(
+ [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
+}
+
+void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
+
+void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
+
+void EnsureMainThreadIDIsCorrect() {
+ auto *t = __hwasan::GetCurrentThread();
+ if (t && (t->IsMainThread()))
+ t->set_os_id(GetTid());
+}
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ auto *t = GetThreadByOsIDLocked(os_id);
+ if (!t)
+ return false;
+ *stack_begin = t->stack_bottom();
+ *stack_end = t->stack_top();
+ *tls_begin = t->tls_begin();
+ *tls_end = t->tls_end();
+ // Fixme: is this correct for HWASan.
+ *cache_begin = 0;
+ *cache_end = 0;
+ *dtls = t->dtls();
+ return true;
+}
+
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
+
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
+ void *arg) {}
+
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
+void ReportUnsuspendedThreadsLocked(InternalMmapVector<tid_t> *threads) {}
+
+} // namespace __lsan
uptr total_stack_size;
};
-class HwasanThreadList {
+class SANITIZER_MUTEX HwasanThreadList {
public:
HwasanThreadList(uptr storage, uptr size)
: free_space_(storage), free_space_end_(storage + size) {
for (Thread *t : live_list_) cb(t);
}
+ template <class CB>
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
+ CheckLocked();
+ for (Thread *t : live_list_)
+ if (cb(t))
+ return t;
+ return nullptr;
+ }
+
void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads++;
uptr GetRingBufferSize() const { return ring_buffer_size_; }
+ void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ live_list_mutex_.CheckLocked();
+ }
+ void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
+ live_list_mutex_.Unlock();
+ }
+
private:
Thread *AllocThread() {
SpinMutexLock l(&free_space_mutex_);