From 04051b5fad96e340d6de5a028356530f881b2bcc Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Thu, 27 Oct 2016 23:42:29 +0000 Subject: [PATCH] AMDGPU/SI: Handle hazard with sgpr lane selects for v_{read,write}lane Reviewers: arsenm Subscribers: kzhuravl, wdng, nhaehnle, yaxunl, tony-tye, llvm-commits Differential Revision: https://reviews.llvm.org/D25637 llvm-svn: 285367 --- llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 34 ++++++++++- llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h | 1 + .../CodeGen/MIR/AMDGPU/inserted-wait-states.mir | 66 ++++++++++++++++++++++ 3 files changed, 100 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index 687076a..1df397b 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -50,7 +50,11 @@ static bool isSSetReg(unsigned Opcode) { return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32; } -static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) { +static bool isRWLane(unsigned Opcode) { + return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; +} + +static bool getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) { const MachineOperand *RegOp = TII->getNamedOperand(RegInstr, AMDGPU::OpName::simm16); @@ -76,6 +80,9 @@ GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0) return NoopHazard; + if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0) + return NoopHazard; + if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0) return NoopHazard; @@ -105,6 +112,9 @@ unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) { if (isDivFMas(MI->getOpcode())) WaitStates = std::max(WaitStates, checkDivFMasHazards(MI)); + if (isRWLane(MI->getOpcode())) + WaitStates = std::max(WaitStates, checkRWLaneHazards(MI)); + return WaitStates; } @@ -438,3 +448,25 @@ int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) { } return WaitStatesNeeded; } + +int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) { + const SIInstrInfo *TII = ST.getInstrInfo(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + const MachineRegisterInfo &MRI = + RWLane->getParent()->getParent()->getRegInfo(); + + const MachineOperand *LaneSelectOp = + TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1); + + if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg())) + return 0; + + unsigned LaneSelectReg = LaneSelectOp->getReg(); + auto IsHazardFn = [TII] (MachineInstr *MI) { + return TII->isVALU(*MI); + }; + + const int RWLaneWaitStates = 4; + int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn); + return RWLaneWaitStates - WaitStatesSince; +} diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h index f0882d0..8b9ea6a 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h @@ -50,6 +50,7 @@ class GCNHazardRecognizer final : public ScheduleHazardRecognizer { int checkSetRegHazards(MachineInstr *SetRegInstr); int createsVALUHazard(const MachineInstr &MI); int checkVALUHazards(MachineInstr *VALU); + int checkRWLaneHazards(MachineInstr *RWLane); public: GCNHazardRecognizer(const MachineFunction &MF); // We can only issue one instruction per cycle. diff --git a/llvm/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir b/llvm/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir index 8936e6f..4d466d4 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir +++ b/llvm/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir @@ -7,6 +7,7 @@ define void @s_getreg() { ret void } define void @s_setreg() { ret void } define void @vmem_gt_8dw_store() { ret void } + define void @readwrite_lane() { ret void } ... --- # GCN-LABEL: name: div_fmas @@ -234,3 +235,68 @@ body: | S_ENDPGM ... + +... +--- + +# GCN-LABEL: name: readwrite_lane + +# GCN-LABEL: bb.0: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_READLANE_B32 + +# GCN-LABEL: bb.1: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_WRITELANE_B32 + +# GCN-LABEL: bb.2: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_READLANE_B32 + +# GCN-LABEL: bb.3: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_WRITELANE_B32 + +name: readwrite_lane + +body: | + bb.0: + successors: %bb.1 + %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0 + S_BRANCH %bb.1 + + bb.1: + successors: %bb.2 + %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0 + S_BRANCH %bb.2 + + bb.2: + successors: %bb.3 + %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo + S_BRANCH %bb.3 + + bb.3: + %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %vgpr4 = V_WRITELANE_B32 %sgpr4, %vcc_lo + S_ENDPGM + +... -- 2.7.4