return false;
}
+static Value *createIntOrPtrToIntCast(Value *V, Type* Ty, IRBuilder<> &IRB) {
+ return isa<PointerType>(V->getType()) ?
+ IRB.CreatePtrToInt(V, Ty) : IRB.CreateIntCast(V, Ty, false);
+}
+
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
// standards. For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
Type *PtrTy = Ty->getPointerTo();
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
createOrdering(&IRB, LI->getOrdering())};
- CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
- ReplaceInstWithInst(I, C);
-
+ Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
+ if (Ty == OrigTy) {
+ Instruction *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
+ ReplaceInstWithInst(I, C);
+ } else {
+ // We are loading a pointer, so we need to cast the return value.
+ Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
+ Instruction *Cast = CastInst::Create(Instruction::IntToPtr, C, OrigTy);
+ ReplaceInstWithInst(I, Cast);
+ }
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Addr = SI->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr, DL);
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
- IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
+ createIntOrPtrToIntCast(SI->getValueOperand(), Ty, IRB),
createOrdering(&IRB, SI->getOrdering())};
CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
ReplaceInstWithInst(I, C);
const unsigned BitSize = ByteSize * 8;
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
+ Value *CmpOperand =
+ createIntOrPtrToIntCast(CASI->getCompareOperand(), Ty, IRB);
+ Value *NewOperand =
+ createIntOrPtrToIntCast(CASI->getNewValOperand(), Ty, IRB);
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
- IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
- IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
+ CmpOperand,
+ NewOperand,
createOrdering(&IRB, CASI->getSuccessOrdering()),
createOrdering(&IRB, CASI->getFailureOrdering())};
CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
- Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand());
+ Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
+ Value *OldVal = C;
+ Type *OrigOldValTy = CASI->getNewValOperand()->getType();
+ if (Ty != OrigOldValTy) {
+ // The value is a pointer, so we need to cast the return value.
+ OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
+ }
- Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0);
+ Value *Res =
+ IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
Res = IRB.CreateInsertValue(Res, Success, 1);
I->replaceAllUsesWith(Res);
; CHECK-LABEL: atomic64_load_seq_cst
; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 5), !dbg
+define i8* @atomic64_load_seq_cst_ptr_ty(i8** %a) nounwind uwtable {
+entry:
+ %0 = load atomic i8*, i8** %a seq_cst, align 8, !dbg !7
+ ret i8* %0, !dbg !7
+}
+; CHECK-LABEL: atomic64_load_seq_cst
+; CHECK: bitcast i8** %{{.+}} to i64*
+; CHECK-NEXT: call i64 @__tsan_atomic64_load(i64* %{{.+}}, i32 5), !dbg
+; CHECK-NEXT: inttoptr i64 %{{.+}} to i8*
+
define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
entry:
store atomic i64 0, i64* %a unordered, align 8, !dbg !7
; CHECK-LABEL: atomic64_store_seq_cst
; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 5), !dbg
+define void @atomic64_store_seq_cst_ptr_ty(i8** %a, i8* %v) nounwind uwtable {
+entry:
+ store atomic i8* %v, i8** %a seq_cst, align 8, !dbg !7
+ ret void, !dbg !7
+}
+; CHECK-LABEL: atomic64_store_seq_cst
+; CHECK: %{{.*}} = bitcast i8** %{{.*}} to i64*
+; CHECK-NEXT: %{{.*}} = ptrtoint i8* %{{.*}} to i64
+; CHECK-NEXT: call void @__tsan_atomic64_store(i64* %{{.*}}, i64 %{{.*}}, i32 5), !dbg
+
define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable {
entry:
atomicrmw xchg i64* %a, i64 0 monotonic, !dbg !7
; CHECK-LABEL: atomic64_cas_seq_cst
; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5), !dbg
+define void @atomic64_cas_seq_cst_ptr_ty(i8** %a, i8* %v1, i8* %v2) nounwind uwtable {
+entry:
+ cmpxchg i8** %a, i8* %v1, i8* %v2 seq_cst seq_cst, !dbg !7
+ ret void
+}
+; CHECK-LABEL: atomic64_cas_seq_cst
+; CHECK: {{.*}} = ptrtoint i8* %v1 to i64
+; CHECK-NEXT: {{.*}} = ptrtoint i8* %v2 to i64
+; CHECK-NEXT: {{.*}} = bitcast i8** %a to i64*
+; CHECK-NEXT: {{.*}} = call i64 @__tsan_atomic64_compare_exchange_val(i64* {{.*}}, i64 {{.*}}, i64 {{.*}}, i32 5, i32 5), !dbg
+; CHECK-NEXT: {{.*}} = icmp eq i64
+; CHECK-NEXT: {{.*}} = inttoptr i64 {{.*}} to i8*
+; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } undef, i8* {{.*}}, 0
+; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } {{.*}}, i1 {{.*}}, 1
+
define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
entry:
%0 = load atomic i128, i128* %a unordered, align 16, !dbg !7