typedef unsigned long uptr; // NOLINT
const uptr kPageSize = 4096;
-const int kTidBits = 15;
+const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 40;
MBlockSync,
MBlockClock,
MBlockThreadContex,
+ MBlockDeadInfo,
MBlockRacyStacks,
MBlockRacyAddresses,
MBlockAtExit,
if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
CHECK_GT(*tls_addr + *tls_size, *stk_addr);
CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
- *stk_size = *tls_addr - *stk_addr;
- *stk_size = RoundUp(*stk_size, kPageSize);
- uptr stk_end = *stk_addr + *stk_size;
- if (stk_end > *tls_addr) {
- *tls_size -= *tls_addr - stk_end;
- *tls_addr = stk_end;
- }
+ *stk_size -= *tls_size;
+ *tls_addr = *stk_addr + *stk_size;
}
}
, reuse_count()
, epoch0()
, epoch1()
+ , dead_info()
, dead_next() {
}
// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything.
const u64 kShadowFreed = 0xfffffffffffffff8ull;
-const int kSigCount = 1024;
+const int kSigCount = 128;
const int kShadowStackSize = 1024;
struct my_siginfo_t {
u64 epoch0;
u64 epoch1;
StackTrace creation_stack;
- ThreadDeadInfo dead_info;
- ThreadContext* dead_next; // In dead thread list.
+ ThreadDeadInfo *dead_info;
+ ThreadContext *dead_next; // In dead thread list.
explicit ThreadContext(int tid);
};
trace = &tctx->thr->trace;
} else if (tctx->status == ThreadStatusFinished
|| tctx->status == ThreadStatusDead) {
- trace = &tctx->dead_info.trace;
+ if (tctx->dead_info == 0)
+ return;
+ trace = &tctx->dead_info->trace;
} else {
return;
}
namespace __tsan {
-const int kThreadQuarantineSize = 100;
+const int kThreadQuarantineSize = 16;
static void MaybeReportThreadLeak(ThreadContext *tctx) {
if (tctx->detached)
tctx->status = ThreadStatusInvalid;
tctx->reuse_count++;
tid = tctx->tid;
- // The point to reclain dead_info.
- // delete tctx->dead_info;
+ DestroyAndFree(tctx->dead_info);
} else {
StatInc(thr, StatThreadMaxTid);
tid = ctx->thread_seq++;
}
// Save from info about the thread.
- // If dead_info will become dynamically allocated again,
- // it is the point to allocate it.
- // tctx->dead_info = new ThreadDeadInfo;
- internal_memcpy(&tctx->dead_info.trace.events[0],
+ tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
+ ThreadDeadInfo();
+ internal_memcpy(&tctx->dead_info->trace.events[0],
&thr->trace.events[0], sizeof(thr->trace.events));
for (int i = 0; i < kTraceParts; i++) {
- tctx->dead_info.trace.headers[i].stack0.CopyFrom(
+ tctx->dead_info->trace.headers[i].stack0.CopyFrom(
thr->trace.headers[i].stack0);
}
tctx->epoch1 = thr->clock.get(tctx->tid);
namespace __tsan {
const int kTraceParts = 8;
-const int kTraceSize = 1024*1024;
+const int kTraceSize = 128*1024;
const int kTracePartSize = kTraceSize / kTraceParts;
// Must fit into 3 bits.
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, detached);
+ pthread_attr_setstacksize(&attr, 64*1024);
pthread_create(&impl_->thread, &attr,
ScopedThread::Impl::ScopedThreadCallback, impl_);
}