__tsan_unaligned*
__tsan_release
__tsan_acquire
+__tsan_memcpy
+__tsan_memmove
+__tsan_memset
__tsan_mutex_create
__tsan_mutex_destroy
__tsan_mutex_pre_lock
constexpr u32 kBarrierThreadBits = 10;
constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
atomic_uint32_t *barrier, u32 num_threads) {
if (num_threads >= kBarrierThreads) {
Printf("barrier_init: count is too large (%d)\n", num_threads);
return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
atomic_uint32_t *barrier) {
u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
u32 old_epoch = barrier_epoch(old);
FutexWait(barrier, cur);
}
}
+
+void *__tsan_memcpy(void *dst, const void *src, uptr size) {
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+void *__tsan_memset(void *dst, int c, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
+}
+
+void *__tsan_memmove(void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_vptr_update(void **vptr_p, void *new_val);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memcpy(void *dest, const void *src, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memset(void *dest, int ch, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memmove(void *dest, const void *src, uptr count);
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK: Write of size 8 at {{.*}} by thread T1:
-// CHECK: #0 memset
+// CHECK: #0 {{.*}}memset
// CHECK: #{{[12]}} Thread
// CHECK-NOT: bad PC passed to __tsan_symbolize_external
// CHECK-NOT: __sanitizer_report_error_summary
}
// CHECK: WARNING: ThreadSanitizer: data race
-// CHECK: #0 memset
+// CHECK: #0 {{.*}}memset
// CHECK: #{{[12]}} MemSetThread
// CHECK: Previous write
-// CHECK: #0 {{(memcpy|memmove)}}
+// CHECK: #0 {{.*mem(cpy|move)}}
// CHECK: #{{[12]}} MemCpyThread
}
// CHECK: WARNING: ThreadSanitizer: data race
-// CHECK: #0 memset
+// CHECK: #0 {{.*}}memset
// CHECK: #{{[12]}} MemSetThread
// CHECK: Previous write
-// CHECK: #0 {{(memcpy|memmove)}}
+// CHECK: #0 {{.*mem(cpy|move)}}
// CHECK: #{{[12]}} MemMoveThread
// CHECK: addr=[[ADDR:0x[0-9,a-f]+]]
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK: Write of size 3 at [[ADDR]] by thread T2:
-// CHECK: #0 {{(memcpy|memmove)}}
+// CHECK: #0 {{.*mem(cpy|move)}}
// CHECK: #{{[12]}} Thread2
// CHECK: Previous read of size 1 at [[ADDR]] by thread T1:
-// CHECK: #0 memcmp
+// CHECK: #0 {{.*}}memcmp
// CHECK: #{{[12]}} Thread1
// CHECK: addr2=[[ADDR2:0x[0-9,a-f]+]]
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK: Write of size 4 at [[ADDR1]] by thread T2:
-// CHECK: #0 {{(memcpy|memmove)}}
+// CHECK: #0 {{.*mem(cpy|move)}}
// CHECK: #{{[12]}} Thread2
// CHECK: Previous write of size 1 at [[ADDR2]] by thread T1:
-// CHECK: #0 {{(memcpy|memmove)}}
+// CHECK: #0 {{.*mem(cpy|move)}}
// CHECK: #{{[12]}} Thread1
}
MemmoveFn =
- M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
+ M.getOrInsertFunction("__tsan_memmove", Attr, IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
MemcpyFn =
- M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
+ M.getOrInsertFunction("__tsan_memcpy", Attr, IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
MemsetFn =
- M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
+ M.getOrInsertFunction("__tsan_memset", Attr, IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
}
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
ret void
; CHECK: define void @MemCpyTest
-; CHECK: call i8* @memcpy
+; CHECK: call i8* @__tsan_memcpy
; CHECK: ret void
}
tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
ret void
; CHECK: define void @MemCpyInlineTest
-; CHECK: call i8* @memcpy
+; CHECK: call i8* @__tsan_memcpy
; CHECK: ret void
}
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
ret void
; CHECK: define void @MemMoveTest
-; CHECK: call i8* @memmove
+; CHECK: call i8* @__tsan_memmove
; CHECK: ret void
}
tail call void @llvm.memset.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
ret void
; CHECK: define void @MemSetTest
-; CHECK: call i8* @memset
+; CHECK: call i8* @__tsan_memset
; CHECK: ret void
}
tail call void @llvm.memset.inline.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
ret void
; CHECK: define void @MemSetInlineTest
-; CHECK: call i8* @memset
+; CHECK: call i8* @__tsan_memset
; CHECK: ret void
}