From c1efa64c631c41579ea7dea3c5b0b7d46403e5ba Mon Sep 17 00:00:00 2001 From: Anna Zaks Date: Mon, 7 Mar 2016 23:16:23 +0000 Subject: [PATCH] [tsan] Add support for pointer typed atomic stores, loads, and cmpxchg TSan instrumentation functions for atomic stores, loads, and cmpxchg work on integer value types. This patch adds casts before calling TSan instrumentation functions in cases where the value is a pointer. Differential Revision: http://reviews.llvm.org/D17833 llvm-svn: 262876 --- .../Transforms/Instrumentation/ThreadSanitizer.cpp | 39 +++++++++++++++++----- .../test/Instrumentation/ThreadSanitizer/atomic.ll | 35 +++++++++++++++++++ 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 9331e1d..e73a4b2 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -496,6 +496,11 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { return false; } +static Value *createIntOrPtrToIntCast(Value *V, Type* Ty, IRBuilder<> &IRB) { + return isa(V->getType()) ? + IRB.CreatePtrToInt(V, Ty) : IRB.CreateIntCast(V, Ty, false); +} + // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x // standards. For background see C++11 standard. A slightly older, publicly // available draft of the standard (not entirely up-to-date, but close enough @@ -517,9 +522,16 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) { Type *PtrTy = Ty->getPointerTo(); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), createOrdering(&IRB, LI->getOrdering())}; - CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args); - ReplaceInstWithInst(I, C); - + Type *OrigTy = cast(Addr->getType())->getElementType(); + if (Ty == OrigTy) { + Instruction *C = CallInst::Create(TsanAtomicLoad[Idx], Args); + ReplaceInstWithInst(I, C); + } else { + // We are loading a pointer, so we need to cast the return value. + Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args); + Instruction *Cast = CastInst::Create(Instruction::IntToPtr, C, OrigTy); + ReplaceInstWithInst(I, Cast); + } } else if (StoreInst *SI = dyn_cast(I)) { Value *Addr = SI->getPointerOperand(); int Idx = getMemoryAccessFuncIndex(Addr, DL); @@ -530,7 +542,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) { Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), - IRB.CreateIntCast(SI->getValueOperand(), Ty, false), + createIntOrPtrToIntCast(SI->getValueOperand(), Ty, IRB), createOrdering(&IRB, SI->getOrdering())}; CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args); ReplaceInstWithInst(I, C); @@ -560,15 +572,26 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) { const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); + Value *CmpOperand = + createIntOrPtrToIntCast(CASI->getCompareOperand(), Ty, IRB); + Value *NewOperand = + createIntOrPtrToIntCast(CASI->getNewValOperand(), Ty, IRB); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), - IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), - IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), + CmpOperand, + NewOperand, createOrdering(&IRB, CASI->getSuccessOrdering()), createOrdering(&IRB, CASI->getFailureOrdering())}; CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args); - Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand()); + Value *Success = IRB.CreateICmpEQ(C, CmpOperand); + Value *OldVal = C; + Type *OrigOldValTy = CASI->getNewValOperand()->getType(); + if (Ty != OrigOldValTy) { + // The value is a pointer, so we need to cast the return value. + OldVal = IRB.CreateIntToPtr(C, OrigOldValTy); + } - Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0); + Value *Res = + IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0); Res = IRB.CreateInsertValue(Res, Success, 1); I->replaceAllUsesWith(Res); diff --git a/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll b/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll index a10ca6c..71dca94 100644 --- a/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll +++ b/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll @@ -1186,6 +1186,16 @@ entry: ; CHECK-LABEL: atomic64_load_seq_cst ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 5), !dbg +define i8* @atomic64_load_seq_cst_ptr_ty(i8** %a) nounwind uwtable { +entry: + %0 = load atomic i8*, i8** %a seq_cst, align 8, !dbg !7 + ret i8* %0, !dbg !7 +} +; CHECK-LABEL: atomic64_load_seq_cst +; CHECK: bitcast i8** %{{.+}} to i64* +; CHECK-NEXT: call i64 @__tsan_atomic64_load(i64* %{{.+}}, i32 5), !dbg +; CHECK-NEXT: inttoptr i64 %{{.+}} to i8* + define void @atomic64_store_unordered(i64* %a) nounwind uwtable { entry: store atomic i64 0, i64* %a unordered, align 8, !dbg !7 @@ -1218,6 +1228,16 @@ entry: ; CHECK-LABEL: atomic64_store_seq_cst ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 5), !dbg +define void @atomic64_store_seq_cst_ptr_ty(i8** %a, i8* %v) nounwind uwtable { +entry: + store atomic i8* %v, i8** %a seq_cst, align 8, !dbg !7 + ret void, !dbg !7 +} +; CHECK-LABEL: atomic64_store_seq_cst +; CHECK: %{{.*}} = bitcast i8** %{{.*}} to i64* +; CHECK-NEXT: %{{.*}} = ptrtoint i8* %{{.*}} to i64 +; CHECK-NEXT: call void @__tsan_atomic64_store(i64* %{{.*}}, i64 %{{.*}}, i32 5), !dbg + define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable { entry: atomicrmw xchg i64* %a, i64 0 monotonic, !dbg !7 @@ -1538,6 +1558,21 @@ entry: ; CHECK-LABEL: atomic64_cas_seq_cst ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5), !dbg +define void @atomic64_cas_seq_cst_ptr_ty(i8** %a, i8* %v1, i8* %v2) nounwind uwtable { +entry: + cmpxchg i8** %a, i8* %v1, i8* %v2 seq_cst seq_cst, !dbg !7 + ret void +} +; CHECK-LABEL: atomic64_cas_seq_cst +; CHECK: {{.*}} = ptrtoint i8* %v1 to i64 +; CHECK-NEXT: {{.*}} = ptrtoint i8* %v2 to i64 +; CHECK-NEXT: {{.*}} = bitcast i8** %a to i64* +; CHECK-NEXT: {{.*}} = call i64 @__tsan_atomic64_compare_exchange_val(i64* {{.*}}, i64 {{.*}}, i64 {{.*}}, i32 5, i32 5), !dbg +; CHECK-NEXT: {{.*}} = icmp eq i64 +; CHECK-NEXT: {{.*}} = inttoptr i64 {{.*}} to i8* +; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } undef, i8* {{.*}}, 0 +; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } {{.*}}, i1 {{.*}}, 1 + define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable { entry: %0 = load atomic i128, i128* %a unordered, align 16, !dbg !7 -- 2.7.4