HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
- Thread *next_; // All live threads form a linked list.
-
u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
return 0;
}
-struct ThreadListHead {
- Thread *list_;
-
- ThreadListHead() : list_(nullptr) {}
-
- void Push(Thread *t) {
- t->next_ = list_;
- list_ = t;
- }
-
- Thread *Pop() {
- Thread *t = list_;
- if (t)
- list_ = t->next_;
- return t;
- }
-
- void Remove(Thread *t) {
- Thread **cur = &list_;
- while (*cur != t) cur = &(*cur)->next_;
- CHECK(*cur && "thread not found");
- *cur = (*cur)->next_;
- }
-
- template <class CB>
- void ForEach(CB cb) {
- Thread *t = list_;
- while (t) {
- cb(t);
- t = t->next_;
- }
- }
-};
-
struct ThreadStats {
uptr n_live_threads;
uptr total_stack_size;
Thread *t;
{
SpinMutexLock l(&list_mutex_);
- t = free_list_.Pop();
- if (t) {
+ if (!free_list_.empty()) {
+ t = free_list_.back();
+ free_list_.pop_back();
uptr start = (uptr)t - ring_buffer_size_;
internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
} else {
t = AllocThread();
}
- live_list_.Push(t);
+ live_list_.push_back(t);
}
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t);
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
+ void RemoveThreadFromLiveList(Thread *t) {
+ for (Thread *&t2 : live_list_)
+ if (t2 == t) {
+ live_list_.erase(&t2);
+ return;
+ }
+ CHECK(0 && "thread not found in live list");
+ }
+
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
SpinMutexLock l(&list_mutex_);
- live_list_.Remove(t);
- free_list_.Push(t);
+ RemoveThreadFromLiveList(t);
+ free_list_.push_back(t);
DontNeedThread(t);
}
template <class CB>
void VisitAllLiveThreads(CB cb) {
SpinMutexLock l(&list_mutex_);
- live_list_.ForEach(cb);
+ for (Thread *t : live_list_) cb(t);
}
void AddThreadStats(Thread *t) {
uptr ring_buffer_size_;
uptr thread_alloc_size_;
- ThreadListHead free_list_;
- ThreadListHead live_list_;
+ InternalMmapVector<Thread *> free_list_;
+ InternalMmapVector<Thread *> live_list_;
SpinMutex list_mutex_;
ThreadStats stats_;
Swap(size_, other.size_);
}
+ void erase(T *t) {
+ if (t + 1 < end())
+ internal_memmove(t, t + 1, (end() - t - 1) * sizeof(T));
+ --size_;
+ }
+
private:
void Realloc(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr));
}
-TEST(SanitizerCommon, InternalMmapVectorReize) {
+TEST(SanitizerCommon, InternalMmapVectorResize) {
InternalMmapVector<uptr> v;
CHECK_EQ(0U, v.size());
CHECK_GE(v.capacity(), v.size());
EXPECT_EQ(vector1, vector4);
}
+TEST(SanitizerCommon, InternalMmapVectorErase) {
+ InternalMmapVector<uptr> v;
+ std::vector<uptr> r;
+ for (uptr i = 0; i < 10; i++) {
+ v.push_back(i);
+ r.push_back(i);
+ }
+
+ v.erase(&v[9]);
+ r.erase(r.begin() + 9);
+ EXPECT_EQ(r.size(), v.size());
+ for (uptr i = 0; i < r.size(); i++) EXPECT_EQ(r[i], v[i]);
+
+ v.erase(&v[3]);
+ r.erase(r.begin() + 3);
+ EXPECT_EQ(r.size(), v.size());
+ for (uptr i = 0; i < r.size(); i++) EXPECT_EQ(r[i], v[i]);
+
+ v.erase(&v[0]);
+ r.erase(r.begin());
+ EXPECT_EQ(r.size(), v.size());
+ for (uptr i = 0; i < r.size(); i++) EXPECT_EQ(r[i], v[i]);
+}
+
void TestThreadInfo(bool main) {
uptr stk_addr = 0;
uptr stk_size = 0;
--- /dev/null
+// Test that Thread objects are reused.
+// RUN: %clangxx_hwasan -mllvm -hwasan-instrument-stack=0 %s -o %t && %env_hwasan_opts=verbose_threads=1 %run %t 2>&1 | FileCheck %s
+
+#include <assert.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <sanitizer/hwasan_interface.h>
+
+#include "../utils.h"
+
+pthread_mutex_t mu = PTHREAD_MUTEX_INITIALIZER;
+
+void *threadfn(void *) {
+ pthread_mutex_lock(UNTAG(&mu));
+ pthread_mutex_unlock(UNTAG(&mu));
+ return nullptr;
+}
+
+void start_stop_threads() {
+ constexpr int N = 4;
+ pthread_t threads[N];
+
+ pthread_mutex_lock(UNTAG(&mu));
+ for (auto &t : threads)
+ pthread_create(&t, nullptr, threadfn, nullptr);
+ pthread_mutex_unlock(UNTAG(&mu));
+
+ for (auto &t : threads)
+ pthread_join(t, nullptr);
+}
+
+int main() {
+ // Cut off initial threads.
+ // CHECK: === test start ===
+ untag_fprintf(stderr, "=== test start ===\n");
+
+ // CHECK: Creating : T{{[0-9]+}} [[A:0x[0-9a-f]+]] stack:
+ // CHECK: Creating : T{{[0-9]+}} [[B:0x[0-9a-f]+]] stack:
+ start_stop_threads();
+
+ // CHECK-DAG: Creating : T{{[0-9]+}} [[A]] stack:
+ // CHECK-DAG: Creating : T{{[0-9]+}} [[B]] stack:
+ start_stop_threads();
+
+ // CHECK-DAG: Creating : T{{[0-9]+}} [[A]] stack:
+ // CHECK-DAG: Creating : T{{[0-9]+}} [[B]] stack:
+ start_stop_threads();
+
+ return 0;
+}
// CHECK: in Deallocate
// CHECK: previously allocated here:
// CHECK: in Allocate
- // CHECK: Thread: T2 0x
- // CHECK: Thread: T3 0x
+ // CHECK-DAG: Thread: T2 0x
+ // CHECK-DAG: Thread: T3 0x
// CHECK-DAG: Thread: T0 0x
// CHECK-DAG: Thread: T1 0x
__sync_fetch_and_add(&state, 1);