if (const LoadInst *Load = dyn_cast<LoadInst>(V))
return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
+ // Atomics are divergent because they are executed sequentially: when an
+ // atomic operation refers to the same address in each thread, then each
+ // thread after the first sees the value written by the previous thread as
+ // original value.
+ if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
+ return true;
+
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
const TargetMachine &TM = getTLI()->getTargetMachine();
return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic);
--- /dev/null
+; RUN: opt -mtriple=amdgcn-- -analyze -divergence %s | FileCheck %s
+
+; CHECK: DIVERGENT: %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
+define i32 @test1(i32* %ptr, i32 %val) #0 {
+ %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
+ ret i32 %orig
+}
+
+; CHECK: DIVERGENT: %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+define {i32, i1} @test2(i32* %ptr, i32 %cmp, i32 %new) {
+ %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+ ret {i32, i1} %orig
+}
+
+attributes #0 = { "ShaderType"="0" }