still report races between atomic accesses and free().
llvm-svn: 174175
--- /dev/null
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *a) {
+ __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
+ return 0;
+}
+
+int main() {
+ int *a = new int(0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, a);
+ sleep(1);
+ delete a;
+ pthread_join(t, 0);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
--- /dev/null
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *a) {
+ sleep(1);
+ __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
+ return 0;
+}
+
+int main() {
+ int *a = new int(0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, a);
+ delete a;
+ pthread_join(t, 0);
+}
+
+// CHECK: WARNING: ThreadSanitizer: heap-use-after-free
--- /dev/null
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+void *Thread(void *x) {
+ pthread_mutex_lock((pthread_mutex_t*)x);
+ pthread_mutex_unlock((pthread_mutex_t*)x);
+ return 0;
+}
+
+int main() {
+ pthread_mutex_t Mtx;
+ pthread_mutex_init(&Mtx, 0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, &Mtx);
+ sleep(1);
+ pthread_mutex_destroy(&Mtx);
+ pthread_join(t, 0);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
- if (flags()->report_atomic_races)
- MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return *a;
}
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
- if (flags()->report_atomic_races)
- MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
- if (flags()->report_atomic_races)
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
- if (flags()->report_atomic_races)
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
- if (flags()->report_atomic_races)
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))
}
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();
x_ |= kFreedBit;
}
+ bool IsFreed() const {
+ return x_ & kFreedBit;
+ }
+
bool GetFreedAndReset() {
bool res = x_ & kFreedBit;
x_ &= ~kFreedBit;
int in_rtl;
bool in_symbolizer;
bool is_alive;
+ bool is_freeing;
const uptr stk_addr;
const uptr stk_size;
const uptr tls_addr;
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
- if (!linker_init && IsAppMem(addr))
+ if (!linker_init && IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
if (s == 0)
return;
- if (IsAppMem(addr))
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
return false;
}
+static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
+ Shadow s0(thr->racy_state[0]);
+ Shadow s1(thr->racy_state[1]);
+ CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
+ if (!s0.IsAtomic() && !s1.IsAtomic())
+ return true;
+ if (s0.IsAtomic() && s1.IsFreed())
+ return true;
+ if (s1.IsAtomic() && thr->is_freeing)
+ return true;
+ return false;
+}
+
void ReportRace(ThreadState *thr) {
if (!flags()->report_bugs)
return;
ScopedInRtl in_rtl;
+ if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ return;
+
if (thr->in_signal_handler)
Printf("ThreadSanitizer: printing report from signal handler."
" Can crash or hang.\n");