From 72da47df2514bd54b0d9378280625591f9dd2f9e Mon Sep 17 00:00:00 2001 From: Mark Searles Date: Mon, 16 Jul 2018 10:02:41 +0000 Subject: [PATCH] run post-RA hazard recognizer pass late Memory legalizer, waitcnt, and shrink passes can perturb the instructions, which means that the post-RA hazard recognizer pass should run after them. Otherwise, one of those passes may invalidate the work done by the hazard recognizer. Note that this has adverse side-effect that any consecutive S_NOP 0's, emitted by the hazard recognizer, will not be shrunk into a single S_NOP . This should be addressed in a follow-on patch. Differential Revision: https://reviews.llvm.org/D49288 llvm-svn: 337154 --- llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 10 ++++--- llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp | 2 +- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll | 23 +++++++++++----- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll | 31 +++++++++++++++++++++- llvm/test/CodeGen/AMDGPU/memory_clause.ll | 6 ++++- 5 files changed, 59 insertions(+), 13 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 7dfe33b..2205819 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -886,6 +886,10 @@ void GCNPassConfig::addPreSched2() { } void GCNPassConfig::addPreEmitPass() { + addPass(createSIMemoryLegalizerPass()); + addPass(createSIInsertWaitcntsPass()); + addPass(createSIShrinkInstructionsPass()); + // The hazard recognizer that runs as part of the post-ra scheduler does not // guarantee to be able handle all hazards correctly. This is because if there // are multiple scheduling regions in a basic block, the regions are scheduled @@ -894,11 +898,11 @@ void GCNPassConfig::addPreEmitPass() { // // Here we add a stand-alone hazard recognizer pass which can handle all // cases. + // + // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would + // be better for it to emit S_NOP when possible. addPass(&PostRAHazardRecognizerID); - addPass(createSIMemoryLegalizerPass()); - addPass(createSIInsertWaitcntsPass()); - addPass(createSIShrinkInstructionsPass()); addPass(&SIInsertSkipsPassID); addPass(createSIDebuggerInsertNopsPass()); addPass(&BranchRelaxationPassID); diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index 4e1200a..938cdaf1 100644 --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -737,7 +737,7 @@ bool SIGfx6CacheControl::insertWait(MachineBasicBlock::iterator &MI, case SIAtomicScope::WAVEFRONT: case SIAtomicScope::SINGLETHREAD: // The L1 cache keeps all memory operations in order for - // wavesfronts in the same work-group. + // wavefronts in the same work-group. break; default: llvm_unreachable("Unsupported synchronization scope"); diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll index 1ee8320..18ede50 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll @@ -6,7 +6,9 @@ ; VI-LABEL: {{^}}dpp_test: ; VI: v_mov_b32_e32 v0, s{{[0-9]+}} ; VI-NOOPT: v_mov_b32_e32 v1, s{{[0-9]+}} -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI-OPT: v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11] ; VI-NOOPT: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11] define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) { @@ -18,10 +20,14 @@ define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) { ; VI-LABEL: {{^}}dpp_wait_states: ; VI-NOOPT: v_mov_b32_e32 [[VGPR1:v[0-9]+]], s{{[0-9]+}} ; VI: v_mov_b32_e32 [[VGPR0:v[0-9]+]], s{{[0-9]+}} -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI-OPT: v_mov_b32_dpp [[VGPR0]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI: s_nop 1 +; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl: +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI-OPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; VI-NOOPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) { @@ -35,11 +41,14 @@ define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) { ; VI: ; %endif ; VI-OPT: s_mov_b32 ; VI-OPT: s_mov_b32 -; VI-NOOPT: s_nop 1 +; VI-NOOPT: s_waitcnt +; VI-NOOPT-NEXT: s_nop 0 ; VI: v_mov_b32_dpp [[VGPR0:v[0-9]+]], v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI: s_nop 1 +; VI-OPT: s_nop 1 ; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 define amdgpu_kernel void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) { %cmp = fcmp oeq float %cond, 0.0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll index 4d1dbf5..d328b10 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll @@ -4,7 +4,9 @@ ; VI-LABEL: {{^}}dpp_test: ; VI: v_mov_b32_e32 v0, s{{[0-9]+}} ; VI: v_mov_b32_e32 v1, s{{[0-9]+}} -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11] define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in1, i32 %in2) { %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 1) #0 @@ -12,6 +14,33 @@ define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in1, i32 %in2) ret void } +; VI-LABEL: {{^}}dpp_test1: +; VI-OPT: v_add_u32_e32 [[REG:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}} +; VI-NOOPT: v_mov_b32_e32 v{{[0-9]+}}, 0 +; VI-NOOPT: v_mov_b32_e32 [[REG:v[0-9]+]], v{{[0-9]+}} +; VI-NEXT: s_nop 0 +; VI-NEXT: s_nop 0 +; VI-NEXT: v_mov_b32_dpp v2, [[REG]] quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf +@0 = internal unnamed_addr addrspace(3) global [448 x i32] undef, align 4 +define weak_odr amdgpu_kernel void @dpp_test1(i32* %arg) local_unnamed_addr { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds [448 x i32], [448 x i32] addrspace(3)* @0, i32 0, i32 %tmp + %tmp3 = load i32, i32 addrspace(3)* %tmp2, align 4 + fence syncscope("workgroup") release + tail call void @llvm.amdgcn.s.barrier() + fence syncscope("workgroup") acquire + %tmp4 = add nsw i32 %tmp3, %tmp3 + %tmp5 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp4, i32 177, i32 15, i32 15, i1 zeroext false) + %tmp6 = add nsw i32 %tmp5, %tmp4 + %tmp7 = getelementptr inbounds i32, i32* %arg, i64 %tmp1 + store i32 %tmp6, i32* %tmp7, align 4 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() +declare void @llvm.amdgcn.s.barrier() declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #0 attributes #0 = { nounwind readnone convergent } diff --git a/llvm/test/CodeGen/AMDGPU/memory_clause.ll b/llvm/test/CodeGen/AMDGPU/memory_clause.ll index 516fc34..f4a66f8 100644 --- a/llvm/test/CodeGen/AMDGPU/memory_clause.ll +++ b/llvm/test/CodeGen/AMDGPU/memory_clause.ll @@ -77,6 +77,7 @@ bb: ; GCN-NEXT: buffer_load_dword ; GCN-NEXT: buffer_load_dword ; GCN-NEXT: s_nop +; GCN-NEXT: s_nop ; GCN-NEXT: buffer_load_dword define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %arg, <4 x i32> addrspace(5)* noalias nocapture %arg1) { bb: @@ -105,8 +106,9 @@ bb: ; GCN-LABEL: {{^}}vector_clause_indirect: ; GCN: global_load_dwordx2 [[ADDR:v\[[0-9:]+\]]], v[{{[0-9:]+}}], off -; GCN-NEXT: s_nop +; GCN-NEXT: s_nop 0 ; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_nop 0 ; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off ; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off offset:16 define amdgpu_kernel void @vector_clause_indirect(i64 addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture readnone %arg1, <4 x i32> addrspace(1)* noalias nocapture %arg2) { @@ -128,6 +130,7 @@ bb: ; GCN-LABEL: {{^}}load_global_d16_hi: ; GCN: global_load_short_d16_hi v ; GCN-NEXT: s_nop +; GCN-NEXT: s_nop ; GCN-NEXT: global_load_short_d16_hi v define void @load_global_d16_hi(i16 addrspace(1)* %in, i16 %reg, <2 x i16> addrspace(1)* %out) { entry: @@ -147,6 +150,7 @@ entry: ; GCN-LABEL: {{^}}load_global_d16_lo: ; GCN: global_load_short_d16 v ; GCN-NEXT: s_nop +; GCN-NEXT: s_nop ; GCN-NEXT: global_load_short_d16 v define void @load_global_d16_lo(i16 addrspace(1)* %in, i32 %reg, <2 x i16> addrspace(1)* %out) { entry: -- 2.7.4