return true;
}
-static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
+static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
llvm::Value *Dest, llvm::Value *Ptr,
llvm::Value *Val1, llvm::Value *Val2,
uint64_t Size, unsigned Align,
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Pair->setVolatile(E->isVolatile());
+ Pair->setWeak(IsWeak);
// Cmp holds the result of the compare-exchange operation: true on success,
// false on failure.
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- llvm::Value *Dest, llvm::Value *Ptr,
- llvm::Value *Val1, llvm::Value *Val2,
+ bool IsWeak, llvm::Value *Dest,
+ llvm::Value *Ptr, llvm::Value *Val1,
+ llvm::Value *Val2,
llvm::Value *FailureOrderVal,
uint64_t Size, unsigned Align,
llvm::AtomicOrdering SuccessOrder) {
FailureOrder =
llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
}
- emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
- FailureOrder);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
+ SuccessOrder, FailureOrder);
return;
}
// doesn't matter unless someone is crazy enough to use something that
// doesn't fold to a constant for the ordering.
CGF.Builder.SetInsertPoint(MonotonicBB);
- emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, Align, SuccessOrder, llvm::Monotonic);
CGF.Builder.CreateBr(ContBB);
if (AcquireBB) {
CGF.Builder.SetInsertPoint(AcquireBB);
- emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, Align, SuccessOrder, llvm::Acquire);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
}
if (SeqCstBB) {
CGF.Builder.SetInsertPoint(SeqCstBB);
- emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
- llvm::Value *FailureOrder, uint64_t Size,
- unsigned Align, llvm::AtomicOrdering Order) {
+ llvm::Value *IsWeak, llvm::Value *FailureOrder,
+ uint64_t Size, unsigned Align,
+ llvm::AtomicOrdering Order) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
llvm_unreachable("Already handled!");
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Align, Order);
+ return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Align, Order);
+ return;
case AtomicExpr::AO__atomic_compare_exchange:
- case AtomicExpr::AO__atomic_compare_exchange_n:
- emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
- Size, Align, Order);
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
+ if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
+ emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
+ Val1, Val2, FailureOrder, Size, Align, Order);
+ } else {
+ // Create all the relevant BB's
+ llvm::BasicBlock *StrongBB =
+ CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
+ llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
+ llvm::BasicBlock *ContBB =
+ CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
+
+ llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
+ SI->addCase(CGF.Builder.getInt1(false), StrongBB);
+
+ CGF.Builder.SetInsertPoint(StrongBB);
+ emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Align, Order);
+ CGF.Builder.CreateBr(ContBB);
+
+ CGF.Builder.SetInsertPoint(WeakBB);
+ emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
+ FailureOrder, Size, Align, Order);
+ CGF.Builder.CreateBr(ContBB);
+
+ CGF.Builder.SetInsertPoint(ContBB);
+ }
return;
+ }
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load: {
bool UseLibcall = (Size != Align ||
getContext().toBits(sizeChars) > MaxInlineWidthInBits);
- llvm::Value *OrderFail = nullptr, *Val1 = nullptr, *Val2 = nullptr;
+ llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
+ *Val2 = nullptr;
llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
else
Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
- // Evaluate and discard the 'weak' argument.
if (E->getNumSubExprs() == 6)
- EmitScalarExpr(E->getWeak());
+ IsWeak = EmitScalarExpr(E->getWeak());
break;
case AtomicExpr::AO__c11_atomic_fetch_add:
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
case AtomicExpr::AO_ABI_memory_order_relaxed:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::Monotonic);
break;
case AtomicExpr::AO_ABI_memory_order_consume:
case AtomicExpr::AO_ABI_memory_order_acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::Acquire);
break;
case AtomicExpr::AO_ABI_memory_order_release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::Release);
break;
case AtomicExpr::AO_ABI_memory_order_acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::AcquireRelease);
break;
case AtomicExpr::AO_ABI_memory_order_seq_cst:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::SequentiallyConsistent);
break;
default: // invalid order
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::Monotonic);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::Acquire);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::AcquireRelease);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Size, Align, llvm::SequentiallyConsistent);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
_Bool fi4b(int *i) {
// CHECK-LABEL: @fi4
- // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
+ // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
// CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
// CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
// CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
// CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic
__c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
- // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
+ // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
// Unknown ordering: conservatively pick strongest valid option (for now!).
__atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
// Undefined behaviour: don't really care what that last ordering is so leave
// it out:
__atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
- // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
+ // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
}
// CHECK-LABEL: @generalFailureOrder
// CHECK: br
}
+void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
+ __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
+ // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
+ // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
+
+ // CHECK: [[STRONG]]:
+ // CHECK-NOT: br
+ // CHECK: cmpxchg {{.*}} seq_cst seq_cst
+ // CHECK: br
+
+ // CHECK: [[WEAK]]:
+ // CHECK-NOT: br
+ // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst
+ // CHECK: br
+}
+
+// Having checked the flow in the previous two cases, we'll trust clang to
+// combine them sanely.
+void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
+ __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
+
+ // CHECK: = cmpxchg {{.*}} monotonic monotonic
+ // CHECK: = cmpxchg weak {{.*}} monotonic monotonic
+ // CHECK: = cmpxchg {{.*}} acquire monotonic
+ // CHECK: = cmpxchg {{.*}} acquire acquire
+ // CHECK: = cmpxchg weak {{.*}} acquire monotonic
+ // CHECK: = cmpxchg weak {{.*}} acquire acquire
+ // CHECK: = cmpxchg {{.*}} release monotonic
+ // CHECK: = cmpxchg weak {{.*}} release monotonic
+ // CHECK: = cmpxchg {{.*}} acq_rel monotonic
+ // CHECK: = cmpxchg {{.*}} acq_rel acquire
+ // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic
+ // CHECK: = cmpxchg weak {{.*}} acq_rel acquire
+ // CHECK: = cmpxchg {{.*}} seq_cst monotonic
+ // CHECK: = cmpxchg {{.*}} seq_cst acquire
+ // CHECK: = cmpxchg {{.*}} seq_cst seq_cst
+ // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic
+ // CHECK: = cmpxchg weak {{.*}} seq_cst acquire
+ // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst
+}
+
#endif