const DataLayout *DL;
DominatorTree *DT;
bool HasDPP;
+ bool IsPixelShader;
void optimizeAtomic(Instruction &I, Instruction::BinaryOps Op,
unsigned ValIdx, bool ValDivergent) const;
const TargetMachine &TM = TPC.getTM<TargetMachine>();
const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
HasDPP = ST.hasDPP();
+ IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS;
visit(F);
// Start building just before the instruction.
IRBuilder<> B(&I);
+ // If we are in a pixel shader, because of how we have to mask out helper
+ // lane invocations, we need to record the entry and exit BB's.
+ BasicBlock *PixelEntryBB = nullptr;
+ BasicBlock *PixelExitBB = nullptr;
+
+ // If we're optimizing an atomic within a pixel shader, we need to wrap the
+ // entire atomic operation in a helper-lane check. We do not want any helper
+ // lanes that are around only for the purposes of derivatives to take part
+ // in any cross-lane communication, and we use a branch on whether the lane is
+ // live to do this.
+ if (IsPixelShader) {
+ // Record I's original position as the entry block.
+ PixelEntryBB = I.getParent();
+
+ Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {});
+ Instruction *const NonHelperTerminator =
+ SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);
+
+ // Record I's new position as the exit block.
+ PixelExitBB = I.getParent();
+
+ I.moveBefore(NonHelperTerminator);
+ B.SetInsertPoint(&I);
+ }
+
Type *const Ty = I.getType();
const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
Type *const VecTy = VectorType::get(B.getInt32Ty(), 2);
// first lane, to get our lane's index into the atomic result.
Value *const Result = B.CreateBinOp(Op, BroadcastI, LaneOffset);
- // Replace the original atomic instruction with the new one.
- I.replaceAllUsesWith(Result);
+ if (IsPixelShader) {
+ // Need a final PHI to reconverge to above the helper lane branch mask.
+ B.SetInsertPoint(PixelExitBB->getFirstNonPHI());
+
+ PHINode *const PHI = B.CreatePHI(Ty, 2);
+ PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB);
+ PHI->addIncoming(Result, I.getParent());
+ I.replaceAllUsesWith(PHI);
+ } else {
+ // Replace the original atomic instruction with the new one.
+ I.replaceAllUsesWith(Result);
+ }
// And delete the original.
I.eraseFromParent();
--- /dev/null
+; RUN: llc -mtriple=amdgcn-- -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX7LESS %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8MORE %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8MORE %s
+
+declare i1 @llvm.amdgcn.wqm.vote(i1)
+declare i32 @llvm.amdgcn.buffer.atomic.add(i32, <4 x i32>, i32, i32, i1)
+declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1)
+
+; Show that what the atomic optimization pass will do for raw buffers.
+
+; GCN-LABEL: add_i32_constant:
+; GCN-LABEL: BB0_1:
+; GCN: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
+; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_lo:[0-9]+]], s[[exec_lo]], 0
+; GCN: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_hi:[0-9]+]], s[[exec_hi]], v[[mbcnt_lo]]
+; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc, 0, v[[mbcnt_hi]]
+; GCN: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
+; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
+; GCN: buffer_atomic_add v[[value]]
+; GCN: v_readfirstlane_b32 s{{[0-9]+}}, v[[value]]
+define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %inout) {
+entry:
+ %cond1 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+ %old = call i32 @llvm.amdgcn.buffer.atomic.add(i32 5, <4 x i32> %inout, i32 0, i32 0, i1 0)
+ %cond2 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+ %cond = and i1 %cond1, %cond2
+ br i1 %cond, label %if, label %else
+if:
+ %bitcast = bitcast i32 %old to float
+ call void @llvm.amdgcn.buffer.store.f32(float %bitcast, <4 x i32> %out, i32 0, i32 0, i1 0, i1 0)
+ ret void
+else:
+ ret void
+}
+
+; GCN-LABEL: add_i32_varying:
+; GFX7LESS-NOT: v_mbcnt_lo_u32_b32
+; GFX7LESS-NOT: v_mbcnt_hi_u32_b32
+; GFX8MORE: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_lo:[0-9]+]], exec_lo, 0
+; GFX8MORE: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_hi:[0-9]+]], exec_hi, v[[mbcnt_lo]]
+; GFX8MORE: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 63
+; GFX8MORE: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc, 0, v[[mbcnt_hi]]
+; GFX8MORE: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
+; GFX8MORE: buffer_atomic_add v[[value]]
+; GFX8MORE: v_readfirstlane_b32 s{{[0-9]+}}, v[[value]]
+define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %inout, i32 %val) {
+entry:
+ %cond1 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+ %old = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %val, <4 x i32> %inout, i32 0, i32 0, i1 0)
+ %cond2 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+ %cond = and i1 %cond1, %cond2
+ br i1 %cond, label %if, label %else
+if:
+ %bitcast = bitcast i32 %old to float
+ call void @llvm.amdgcn.buffer.store.f32(float %bitcast, <4 x i32> %out, i32 0, i32 0, i1 0, i1 0)
+ ret void
+else:
+ ret void
+}