From f9a42ed0a7f67979cdd20391366e2a059c2e14c8 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 18 Oct 2019 18:26:37 +0000 Subject: [PATCH] AMDGPU: Relax 32-bit SGPR register class Mostly use SReg_32 instead of SReg_32_XM0 for arbitrary values. This will allow the register coalescer to do a better job eliminating copies to m0. For GlobalISel, as a terrible hack, use SGPR_32 for things that should use SCC until booleans are solved. llvm-svn: 375267 --- llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 2 +- .../Target/AMDGPU/AMDGPUInstructionSelector.cpp | 17 +- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 32 +-- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 8 +- llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 12 +- llvm/lib/Target/AMDGPU/SIRegisterInfo.h | 2 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-add.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.class.mir | 24 +- .../GlobalISel/inst-select-amdgcn.class.s16.mir | 14 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir | 2 +- .../GlobalISel/inst-select-amdgcn.cos.s16.mir | 2 +- .../GlobalISel/inst-select-amdgcn.cvt.pk.i16.mir | 4 +- .../GlobalISel/inst-select-amdgcn.cvt.pk.u16.mir | 4 +- .../inst-select-amdgcn.cvt.pknorm.i16.mir | 4 +- .../inst-select-amdgcn.cvt.pknorm.u16.mir | 4 +- .../GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir | 14 +- .../GlobalISel/inst-select-amdgcn.fmed3.s16.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir | 2 +- .../GlobalISel/inst-select-amdgcn.fract.s16.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir | 6 +- .../GlobalISel/inst-select-amdgcn.ldexp.s16.mir | 4 +- .../GlobalISel/inst-select-amdgcn.mbcnt.lo.mir | 4 +- .../GlobalISel/inst-select-amdgcn.rcp.legacy.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir | 2 +- .../GlobalISel/inst-select-amdgcn.rcp.s16.mir | 2 +- .../GlobalISel/inst-select-amdgcn.rsq.clamp.mir | 2 +- .../GlobalISel/inst-select-amdgcn.rsq.legacy.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir | 2 +- .../GlobalISel/inst-select-amdgcn.rsq.s16.mir | 2 +- .../GlobalISel/inst-select-amdgcn.s.sendmsg.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.sffbh.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir | 2 +- .../GlobalISel/inst-select-amdgcn.sin.s16.mir | 2 +- .../GlobalISel/inst-select-amdgpu-ffbh-u32.mir | 2 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-and.mir | 66 +++--- .../AMDGPU/GlobalISel/inst-select-anyext.mir | 12 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir | 30 +-- .../AMDGPU/GlobalISel/inst-select-ashr.s16.mir | 12 +- .../AMDGPU/GlobalISel/inst-select-bitreverse.mir | 2 +- .../AMDGPU/GlobalISel/inst-select-brcond.mir | 8 +- .../AMDGPU/GlobalISel/inst-select-build-vector.mir | 16 +- .../GlobalISel/inst-select-concat-vectors.mir | 32 +-- .../AMDGPU/GlobalISel/inst-select-constant.mir | 8 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir | 26 +-- .../AMDGPU/GlobalISel/inst-select-ctpop.mir | 8 +- .../AMDGPU/GlobalISel/inst-select-extract.mir | 96 ++++---- .../CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir | 8 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir | 56 ++--- .../AMDGPU/GlobalISel/inst-select-fcmp.s16.mir | 28 +-- .../AMDGPU/GlobalISel/inst-select-ffloor.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-ffloor.s16.mir | 6 +- .../AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-fmaxnum.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-fminnum.mir | 4 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir | 4 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir | 20 +- .../AMDGPU/GlobalISel/inst-select-fptosi.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-fptoui.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-frame-index.mir | 2 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-gep.mir | 10 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-icmp.mir | 10 +- .../AMDGPU/GlobalISel/inst-select-implicit-def.mir | 6 +- .../AMDGPU/GlobalISel/inst-select-insert.mir | 8 +- .../GlobalISel/inst-select-load-constant.mir | 16 +- .../AMDGPU/GlobalISel/inst-select-load-smrd.mir | 20 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir | 30 +-- .../AMDGPU/GlobalISel/inst-select-lshr.s16.mir | 12 +- .../AMDGPU/GlobalISel/inst-select-merge-values.mir | 40 ++-- .../CodeGen/AMDGPU/GlobalISel/inst-select-mul.mir | 4 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-or.mir | 66 +++--- .../CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir | 92 ++++++-- .../AMDGPU/GlobalISel/inst-select-ptr-mask.mir | 22 +- .../AMDGPU/GlobalISel/inst-select-ptrtoint.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-select.mir | 12 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir | 18 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir | 30 +-- .../AMDGPU/GlobalISel/inst-select-shl.s16.mir | 12 +- .../AMDGPU/GlobalISel/inst-select-sitofp.mir | 16 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-smax.mir | 4 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-smin.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-smulh.mir | 4 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-sub.mir | 6 +- .../AMDGPU/GlobalISel/inst-select-trunc.mir | 14 +- .../AMDGPU/GlobalISel/inst-select-uaddo.mir | 24 +- .../AMDGPU/GlobalISel/inst-select-uitofp.mir | 16 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-umax.mir | 4 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-umin.mir | 4 +- .../AMDGPU/GlobalISel/inst-select-umulh.mir | 4 +- .../GlobalISel/inst-select-unmerge-values.mir | 24 +- .../AMDGPU/GlobalISel/inst-select-usubo.mir | 24 +- .../CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir | 66 +++--- .../CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir | 18 +- .../llvm.amdgcn.raw.buffer.store.format.f16.ll | 176 +++++++-------- .../llvm.amdgcn.raw.buffer.store.format.f32.ll | 88 ++++---- .../GlobalISel/llvm.amdgcn.raw.buffer.store.ll | 242 ++++++++++----------- .../AMDGPU/buffer-intrinsics-mmo-offsets.ll | 46 ++-- .../CodeGen/AMDGPU/extract_subvector_vec4_vec3.ll | 7 +- llvm/test/CodeGen/AMDGPU/inline-constraints.ll | 5 +- .../CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll | 4 +- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll | 3 +- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll | 4 +- llvm/test/CodeGen/AMDGPU/read_register.ll | 4 +- 104 files changed, 974 insertions(+), 914 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 0355f79..62904dc 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -636,7 +636,7 @@ MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm, static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) { switch (NumVectorElts) { case 1: - return AMDGPU::SReg_32_XM0RegClassID; + return AMDGPU::SReg_32RegClassID; case 2: return AMDGPU::SReg_64RegClassID; case 3: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index afdeacc..3cfa9d5 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -79,7 +79,9 @@ static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) { if (RC) { // FIXME: This is ambiguous for wave32. This could be SCC or VCC, but the // context of the register bank has been lost. - if (RC->getID() != AMDGPU::SReg_32_XM0RegClassID) + // Has a hack getRegClassForSizeOnBank uses exactly SGPR_32RegClass, which + // won't ever beconstrained any further. + if (RC != &AMDGPU::SGPR_32RegClass) return false; const LLT Ty = MRI.getType(Reg); return Ty.isValid() && Ty.getSizeInBits() == 1; @@ -627,10 +629,8 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { I.eraseFromParent(); - for (Register Reg : { DstReg, Src0Reg, Src1Reg }) { - if (!MRI->getRegClassOrNull(Reg)) - MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); - } + for (Register Reg : { DstReg, Src0Reg, Src1Reg }) + MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); return true; } @@ -1440,7 +1440,7 @@ bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { .addImm(I.getOperand(1).getImm()); } else { const TargetRegisterClass *RC = IsSgpr ? - &AMDGPU::SReg_32_XM0RegClass : &AMDGPU::VGPR_32RegClass; + &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; Register LoReg = MRI->createVirtualRegister(RC); Register HiReg = MRI->createVirtualRegister(RC); @@ -1571,7 +1571,8 @@ bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { if (isSCC(CondReg, *MRI)) { CondPhysReg = AMDGPU::SCC; BrOpcode = AMDGPU::S_CBRANCH_SCC1; - ConstrainRC = &AMDGPU::SReg_32_XM0RegClass; + // FIXME: Hack for isSCC tests + ConstrainRC = &AMDGPU::SGPR_32RegClass; } else if (isVCC(CondReg, *MRI)) { // FIXME: Do we have to insert an and with exec here, like in SelectionDAG? // We sort of know that a VCC producer based on the register bank, that ands @@ -1936,7 +1937,7 @@ AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { // failed trying to select this load into one of the _IMM variants since // the _IMM Patterns are considered before the _SGPR patterns. unsigned PtrReg = GEPInfo.SgprParts[0]; - Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) .addImm(GEPInfo.Imm); return {{ diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index cb92932..8c61f3e 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -115,7 +115,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); - addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); + addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); @@ -141,12 +141,12 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); if (Subtarget->has16BitInsts()) { - addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); - addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); + addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); + addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); // Unless there are also VOP3P operations, not operations are really legal. - addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); - addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); + addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); + addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); } @@ -1821,25 +1821,25 @@ void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, bool IsShader) const { if (Info.hasWorkGroupIDX()) { unsigned Reg = Info.addWorkGroupIDX(); - MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); + MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); CCInfo.AllocateReg(Reg); } if (Info.hasWorkGroupIDY()) { unsigned Reg = Info.addWorkGroupIDY(); - MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); + MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); CCInfo.AllocateReg(Reg); } if (Info.hasWorkGroupIDZ()) { unsigned Reg = Info.addWorkGroupIDZ(); - MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); + MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); CCInfo.AllocateReg(Reg); } if (Info.hasWorkGroupInfo()) { unsigned Reg = Info.addWorkGroupInfo(); - MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); + MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); CCInfo.AllocateReg(Reg); } @@ -3603,22 +3603,22 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( MachineOperand &Src0 = MI.getOperand(1); MachineOperand &Src1 = MI.getOperand(2); - Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, Src0, BoolRC, AMDGPU::sub0, - &AMDGPU::SReg_32_XM0RegClass); + &AMDGPU::SReg_32RegClass); MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, Src0, BoolRC, AMDGPU::sub1, - &AMDGPU::SReg_32_XM0RegClass); + &AMDGPU::SReg_32RegClass); MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, Src1, BoolRC, AMDGPU::sub0, - &AMDGPU::SReg_32_XM0RegClass); + &AMDGPU::SReg_32RegClass); MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, Src1, BoolRC, AMDGPU::sub1, - &AMDGPU::SReg_32_XM0RegClass); + &AMDGPU::SReg_32RegClass); bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); @@ -10562,7 +10562,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, return std::make_pair(0U, nullptr); case 32: case 16: - RC = &AMDGPU::SReg_32_XM0RegClass; + RC = &AMDGPU::SReg_32RegClass; break; case 64: RC = &AMDGPU::SGPR_64RegClass; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 3b50f97..6bc2a0c 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -5201,8 +5201,8 @@ void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, bool Src1IsSGPR = Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); MachineInstr *Xor; - Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); // Build a pair of scalar instructions and add them to the work list. // The next iteration over the work list will lower these to the vector @@ -5246,8 +5246,8 @@ void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, MachineOperand &Src0 = Inst.getOperand(1); MachineOperand &Src1 = Inst.getOperand(2); - Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) .add(Src0) diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 39f1ff5..7005980 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -1946,18 +1946,22 @@ SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size, return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass : &AMDGPU::SReg_64_XEXECRegClass; case AMDGPU::SGPRRegBankID: - return &AMDGPU::SReg_32_XM0RegClass; + return &AMDGPU::SReg_32RegClass; case AMDGPU::SCCRegBankID: // This needs to return an allocatable class, so don't bother returning // the dummy SCC class. - return &AMDGPU::SReg_32_XM0RegClass; + // + // FIXME: This is a grotesque hack. We use SGPR_32 as an indication this + // was not an VCC bank value since we use the larger class SReg_32 for + // other values. These should all use SReg_32. + return &AMDGPU::SGPR_32RegClass; default: llvm_unreachable("unknown register bank"); } } case 32: return RB.getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VGPR_32RegClass : - &AMDGPU::SReg_32_XM0RegClass; + &AMDGPU::SReg_32RegClass; case 64: return RB.getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VReg_64RegClass : &AMDGPU::SReg_64_XEXECRegClass; @@ -1982,7 +1986,7 @@ SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size, default: if (Size < 32) return RB.getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VGPR_32RegClass : - &AMDGPU::SReg_32_XM0RegClass; + &AMDGPU::SReg_32RegClass; return nullptr; } } diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h index d861c76..adc3033 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h @@ -268,7 +268,7 @@ public: const MachineRegisterInfo &MRI) const override; const TargetRegisterClass *getBoolRC() const { - return isWave32 ? &AMDGPU::SReg_32_XM0RegClass + return isWave32 ? &AMDGPU::SReg_32RegClass : &AMDGPU::SReg_64RegClass; } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-add.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-add.mir index a54eb3f..4393113 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-add.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-add.mir @@ -17,7 +17,7 @@ body: | ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[S_ADD_U32_:%[0-9]+]]:sreg_32_xm0 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX6: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX6: %7:vgpr_32, dead %12:sreg_64_xexec = V_ADD_I32_e64 [[COPY2]], [[S_ADD_U32_]], 0, implicit $exec ; GFX6: %8:vgpr_32, dead %11:sreg_64_xexec = V_ADD_I32_e64 [[S_ADD_U32_]], %7, 0, implicit $exec ; GFX6: %9:vgpr_32, dead %10:sreg_64_xexec = V_ADD_I32_e64 %8, [[COPY2]], 0, implicit $exec @@ -26,7 +26,7 @@ body: | ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[S_ADD_U32_:%[0-9]+]]:sreg_32_xm0 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX9: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX9: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY2]], [[S_ADD_U32_]], 0, implicit $exec ; GFX9: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[S_ADD_U32_]], [[V_ADD_U32_e64_]], 0, implicit $exec ; GFX9: [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[V_ADD_U32_e64_1]], [[COPY2]], 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir index f14898e..e1abb29 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir @@ -13,16 +13,16 @@ body: | liveins: $sgpr0, $vgpr0 ; WAVE64-LABEL: name: class_s32_vcc_sv ; WAVE64: liveins: $sgpr0, $vgpr0 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE64: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F32_e64_]] ; WAVE32-LABEL: name: class_s32_vcc_sv ; WAVE32: liveins: $sgpr0, $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE32: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 @@ -42,15 +42,15 @@ body: | ; WAVE64-LABEL: name: class_s32_vcc_vs ; WAVE64: liveins: $sgpr0, $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F32_e64_]] ; WAVE32-LABEL: name: class_s32_vcc_vs ; WAVE32: liveins: $sgpr0, $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:sgpr(s32) = COPY $sgpr0 @@ -78,7 +78,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE32: [[V_CMP_CLASS_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F32_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -106,7 +106,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F64_e64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:vgpr(s32) = COPY $vgpr0 @@ -127,15 +127,15 @@ body: | ; WAVE64-LABEL: name: class_s64_vcc_vs ; WAVE64: liveins: $sgpr0_sgpr1, $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F64_e64_]] ; WAVE32-LABEL: name: class_s64_vcc_vs ; WAVE32: liveins: $sgpr0_sgpr1, $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:sgpr(s32) = COPY $sgpr0 @@ -164,7 +164,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s32) = COPY $vgpr2 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.s16.mir index 5443a04..9b2232d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.s16.mir @@ -21,16 +21,16 @@ body: | liveins: $sgpr0, $vgpr0 ; WAVE32-LABEL: name: class_s16_vcc_sv ; WAVE32: liveins: $sgpr0, $vgpr0 - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F16_e64_]] ; WAVE64-LABEL: name: class_s16_vcc_sv ; WAVE64: liveins: $sgpr0, $vgpr0 ; WAVE64: $vcc_hi = IMPLICIT_DEF - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE64: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 @@ -51,15 +51,15 @@ body: | ; WAVE32-LABEL: name: class_s16_vcc_vs ; WAVE32: liveins: $sgpr0, $vgpr0 ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F16_e64_]] ; WAVE64-LABEL: name: class_s16_vcc_vs ; WAVE64: liveins: $sgpr0, $vgpr0 ; WAVE64: $vcc_hi = IMPLICIT_DEF ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:sgpr(s32) = COPY $sgpr0 @@ -88,7 +88,7 @@ body: | ; WAVE64: $vcc_hi = IMPLICIT_DEF ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE64: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec + ; WAVE64: [[V_CMP_CLASS_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F16_e64 0, [[COPY]], [[COPY1]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir index 87dea1c..29e59cd 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir @@ -14,7 +14,7 @@ body: | ; CHECK-LABEL: name: cos_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_COS_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir index 3e865eb..a18242c 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir @@ -16,7 +16,7 @@ body: | ; CHECK-LABEL: name: cos_s16_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_COS_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.i16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.i16.mir index 01aded2..77efc97 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.i16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.i16.mir @@ -13,7 +13,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: cvt_pk_i16_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_CVT_PK_I16_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PK_I16_I32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PK_I16_I32_e64_]] @@ -36,7 +36,7 @@ body: | ; GCN-LABEL: name: cvt_pk_i16_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_PK_I16_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PK_I16_I32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PK_I16_I32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.u16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.u16.mir index 49dcb86..5d57933 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.u16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pk.u16.mir @@ -13,7 +13,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: cvt_pk_u16_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_CVT_PK_U16_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PK_U16_U32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PK_U16_U32_e64_]] @@ -36,7 +36,7 @@ body: | ; GCN-LABEL: name: cvt_pk_u16_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_PK_U16_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PK_U16_U32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PK_U16_U32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.i16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.i16.mir index 18266bb..9e19922 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.i16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.i16.mir @@ -13,7 +13,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: cvt_pknorm_i16_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_CVT_PKNORM_I16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKNORM_I16_F32_e64_]] @@ -36,7 +36,7 @@ body: | ; GCN-LABEL: name: cvt_pknorm_i16_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_PKNORM_I16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKNORM_I16_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.u16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.u16.mir index e105776..b3f1497 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.u16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pknorm.u16.mir @@ -13,7 +13,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: cvt_pknorm_u16_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_CVT_PKNORM_U16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKNORM_U16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKNORM_U16_F32_e64_]] @@ -36,7 +36,7 @@ body: | ; GCN-LABEL: name: cvt_pknorm_u16_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_PKNORM_U16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKNORM_U16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKNORM_U16_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir index a2a6c16..bc987b0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir @@ -13,7 +13,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: cvt_pkrtz_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]] @@ -36,7 +36,7 @@ body: | ; GCN-LABEL: name: cvt_pkrtz_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir index 4166ff66..92f264f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir @@ -37,7 +37,7 @@ body: | ; GCN-LABEL: name: fmed3_s32_vsvv ; GCN: liveins: $sgpr0, $vgpr0, $vgpr1 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec @@ -62,7 +62,7 @@ body: | ; GCN-LABEL: name: fmed3_s32_vvsv ; GCN: liveins: $sgpr0, $vgpr0, $vgpr1 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]] @@ -87,7 +87,7 @@ body: | ; GCN: liveins: $sgpr0, $vgpr0, $vgpr1 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]] %0:vgpr(s32) = COPY $vgpr0 @@ -111,7 +111,7 @@ body: | ; GCN-LABEL: name: fmed3_s32_vssv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]] @@ -133,7 +133,7 @@ body: | ; GCN-LABEL: name: fmed3_s32_vsvs ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]] @@ -155,7 +155,7 @@ body: | ; GCN-LABEL: name: fmed3_s32_vvss ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY1]], 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]] @@ -177,7 +177,7 @@ body: | ; GCN-LABEL: name: fmed3_s32_vsss ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir index 2c9e079..c475651 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir @@ -45,7 +45,7 @@ body: | ; GCN-LABEL: name: fmed3_s16_vsvv ; GCN: liveins: $sgpr0, $vgpr0, $vgpr1 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir index 98f415e1..479f146 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir @@ -14,7 +14,7 @@ body: | ; CHECK-LABEL: name: fract_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir index 6fef185..ecae749 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir @@ -16,7 +16,7 @@ body: | ; CHECK-LABEL: name: fract_s16_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir index 2c5a575..6c4b5d5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir @@ -12,7 +12,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: ldexp_s32_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]] @@ -34,7 +34,7 @@ body: | ; GCN-LABEL: name: ldexp_s32_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -97,7 +97,7 @@ body: | ; GCN-LABEL: name: ldexp_s64_vvs ; GCN: liveins: $sgpr0_sgpr1, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir index cd9df36..ad63aa8 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir @@ -17,7 +17,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: ldexp_s16_vsv ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]] @@ -40,7 +40,7 @@ body: | ; GCN-LABEL: name: ldexp_s16_vvs ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.mbcnt.lo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.mbcnt.lo.mir index cafcfb9..08e4a66 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.mbcnt.lo.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.mbcnt.lo.mir @@ -25,7 +25,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: mbcnt_lo_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MBCNT_LO_U32_B32_e64_]] @@ -45,7 +45,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smin_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MBCNT_LO_U32_B32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir index ac6a8be..def9d91 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir @@ -19,7 +19,7 @@ body: | ; CHECK-LABEL: name: rcp_legacy_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RCP_LEGACY_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir index 0c8d58b..cd061fc 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir @@ -14,7 +14,7 @@ body: | ; CHECK-LABEL: name: rcp_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir index 83371c2..90cf12e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir @@ -16,7 +16,7 @@ body: | ; CHECK-LABEL: name: rcp_s16_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir index c183931..6e514d2 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir @@ -19,7 +19,7 @@ body: | ; CHECK-LABEL: name: rsq_clamp_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_CLAMP_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir index 6d1040f..0df3078 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir @@ -19,7 +19,7 @@ body: | ; CHECK-LABEL: name: rsq_legacy_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_LEGACY_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir index a3eb158..ae476d1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir @@ -14,7 +14,7 @@ body: | ; CHECK-LABEL: name: rsq_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir index bddf369..be12c84 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir @@ -16,7 +16,7 @@ body: | ; CHECK-LABEL: name: rsq_s16_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.s.sendmsg.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.s.sendmsg.mir index 4129e7b..093109f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.s.sendmsg.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.s.sendmsg.mir @@ -13,7 +13,7 @@ body: | ; GCN-LABEL: name: test_sendmsg ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: $m0 = COPY [[COPY]] ; GCN: S_SENDMSG 1, implicit $exec, implicit $m0 ; GCN: S_ENDPGM 0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sffbh.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sffbh.mir index 84e52db..4664b89 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sffbh.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sffbh.mir @@ -33,7 +33,7 @@ body: | ; CHECK-LABEL: name: sffbh_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_FFBH_I32_e64_:%[0-9]+]]:vgpr_32 = V_FFBH_I32_e64 [[COPY]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_FFBH_I32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir index 60bd142..b069bc7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir @@ -14,7 +14,7 @@ body: | ; CHECK-LABEL: name: sin_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir index dcc5399..ff049d1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir @@ -16,7 +16,7 @@ body: | ; CHECK-LABEL: name: sin_s16_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-ffbh-u32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-ffbh-u32.mir index 026b664..dfc4df0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-ffbh-u32.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-ffbh-u32.mir @@ -58,7 +58,7 @@ body: | ; CHECK-LABEL: name: ffbh_u32_v_s ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_FFBH_U32_e64_:%[0-9]+]]:vgpr_32 = V_FFBH_U32_e64 [[COPY]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_FFBH_U32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir index e449830..a9519b3 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir @@ -28,9 +28,9 @@ body: | ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; WAVE32: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; WAVE32: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec - ; WAVE32: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_U32_e64 [[COPY1]], [[V_MOV_B32_e32_]], implicit $exec - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[V_CMP_EQ_U32_e64_]], [[V_CMP_EQ_U32_e64_1]] + ; WAVE32: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec + ; WAVE32: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[COPY1]], [[V_MOV_B32_e32_]], implicit $exec + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[V_CMP_EQ_U32_e64_]], [[V_CMP_EQ_U32_e64_1]] ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -53,15 +53,15 @@ body: | liveins: $sgpr0, $sgpr1 ; WAVE64-LABEL: name: and_s1_sgpr_sgpr_sgpr ; WAVE64: liveins: $sgpr0, $sgpr1 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]] ; WAVE64: S_ENDPGM 0, implicit [[S_AND_B32_]] ; WAVE32-LABEL: name: and_s1_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]] ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 @@ -118,15 +118,15 @@ body: | liveins: $sgpr0, $sgpr1 ; WAVE64-LABEL: name: and_s16_sgpr_sgpr_sgpr ; WAVE64: liveins: $sgpr0, $sgpr1 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]] ; WAVE64: S_ENDPGM 0, implicit [[S_AND_B32_]] ; WAVE32-LABEL: name: and_s16_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]] ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 @@ -415,7 +415,7 @@ body: | ; WAVE64: S_ENDPGM 0, implicit [[S_AND_B64_]] ; WAVE32-LABEL: name: and_s1_vcc_undef_vcc_undef_vcc ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 undef %1:sreg_32_xm0, undef %2:sreg_32_xm0 + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 undef %1:sreg_32, undef %2:sreg_32 ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %2:vcc(s1) = G_AND undef %0:vcc(s1), undef %1:vcc(s1) S_ENDPGM 0, implicit %2 @@ -483,9 +483,9 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec - ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY1]], implicit $exec - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY1]], implicit $exec + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -513,22 +513,22 @@ body: | ; WAVE64-LABEL: name: copy_select_constrain_vcc_result_reg_wave32 ; WAVE64: liveins: $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]] - ; WAVE64: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B32_]] - ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[COPY2]] - ; WAVE64: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[S_AND_B32_]] - ; WAVE64: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY3]] - ; WAVE64: S_ENDPGM 0, implicit [[COPY4]] + ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]] + ; WAVE64: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY [[S_AND_B32_]] + ; WAVE64: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY2]] + ; WAVE64: S_ENDPGM 0, implicit [[COPY3]] ; WAVE32-LABEL: name: copy_select_constrain_vcc_result_reg_wave32 ; WAVE32: liveins: $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec - ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] - ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] + ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[S_AND_B32_]] + ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %1:vgpr(s32) = COPY $vgpr0 %0:vgpr(s1) = G_TRUNC %1(s32) %2:sgpr(s1) = G_CONSTANT i1 true @@ -555,7 +555,7 @@ body: | ; WAVE64-LABEL: name: copy_select_constrain_vcc_result_reg_wave64 ; WAVE64: liveins: $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE64: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec ; WAVE64: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] @@ -565,12 +565,12 @@ body: | ; WAVE32: liveins: $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]] - ; WAVE32: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B32_]] - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[COPY1]], [[COPY2]] - ; WAVE32: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[S_AND_B32_]] - ; WAVE32: S_ENDPGM 0, implicit [[COPY3]] + ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[S_AND_B32_]] + ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %1:vgpr(s32) = COPY $vgpr0 %0:vgpr(s1) = G_TRUNC %1(s32) %2:sgpr(s1) = G_CONSTANT i1 true diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir index 39f64b2..f1d04ba 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir @@ -15,7 +15,7 @@ body: | ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY]], implicit-def $scc ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; GCN: $scc = COPY [[COPY1]] - ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 1, implicit $scc + ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 1, implicit $scc ; GCN: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:scc(s1) = G_ICMP intpred(eq), %0, %0 @@ -55,7 +55,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: anyext_sgpr_s1_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: $sgpr0 = COPY [[COPY]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s1) = G_TRUNC %0 @@ -73,7 +73,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: anyext_sgpr_s1_to_sgpr_s64 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[COPY]] ; GCN: $sgpr0_sgpr1 = COPY [[COPY1]] %0:sgpr(s32) = COPY $sgpr0 @@ -92,7 +92,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: anyext_sgpr_s8_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: $sgpr0 = COPY [[COPY]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s8) = G_TRUNC %0 @@ -111,7 +111,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: anyext_sgpr_s16_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: $sgpr0 = COPY [[COPY]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -130,7 +130,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: anyext_sgpr_s16_to_sgpr_s64 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[COPY]] ; GCN: $sgpr0_sgpr1 = COPY [[COPY1]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir index f6aafd4..2c48572 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir @@ -54,28 +54,28 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: ashr_s32_sv - ; GFX6: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX6: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX7-LABEL: name: ashr_s32_sv - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX8-LABEL: name: ashr_s32_sv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX9-LABEL: name: ashr_s32_sv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX10-LABEL: name: ashr_s32_sv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] @@ -95,28 +95,28 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: ashr_s32_vs ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX7-LABEL: name: ashr_s32_vs ; GFX7: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX8-LABEL: name: ashr_s32_vs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX9-LABEL: name: ashr_s32_vs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] ; GFX10-LABEL: name: ashr_s32_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I32_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -255,28 +255,28 @@ body: | liveins: $sgpr0, $vgpr0_vgpr1 ; GFX6-LABEL: name: ashr_s64_vs ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX7-LABEL: name: ashr_s64_vs ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX8-LABEL: name: ashr_s64_vs ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] ; GFX9-LABEL: name: ashr_s64_vs ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] ; GFX10-LABEL: name: ashr_s64_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir index e0f94a6..b3402a60 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir @@ -66,18 +66,18 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX8-LABEL: name: ashr_s16_s16_vs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] ; GFX9-LABEL: name: ashr_s16_s16_vs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] ; GFX10-LABEL: name: ashr_s16_s16_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -310,18 +310,18 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX8-LABEL: name: ashr_s16_s16_sv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] ; GFX9-LABEL: name: ashr_s16_s16_sv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] ; GFX10-LABEL: name: ashr_s16_s16_sv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitreverse.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitreverse.mir index 515ad6e..a99e602 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitreverse.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitreverse.mir @@ -44,7 +44,7 @@ body: | bb.0: liveins: $sgpr0 ; CHECK-LABEL: name: bitreverse_i32_vs - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_BFREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_BFREV_B32_e64 [[COPY]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_BFREV_B32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-brcond.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-brcond.mir index 7c2e3c0..c6d8795 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-brcond.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-brcond.mir @@ -20,7 +20,7 @@ body: | ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY2]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: bb.1: @@ -46,7 +46,7 @@ body: | ; GCN-LABEL: name: brcond_scc_impdef ; GCN: bb.0: ; GCN: successors: %bb.1(0x80000000) - ; GCN: [[DEF:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; GCN: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF ; GCN: $scc = COPY [[DEF]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: bb.1: @@ -70,7 +70,7 @@ body: | ; GCN-LABEL: name: brcond_scc_undef ; GCN: bb.0: ; GCN: successors: %bb.1(0x80000000) - ; GCN: $scc = COPY %0:sreg_32_xm0 + ; GCN: $scc = COPY %0:sgpr_32 ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: bb.1: bb.0: @@ -95,7 +95,7 @@ body: | ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY2]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir index 5763a87..a63f475 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir @@ -35,7 +35,7 @@ body: | ; GCN-LABEL: name: test_build_vector_v_v2s32_s_s32_v_s32 ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] @@ -58,7 +58,7 @@ body: | ; GCN-LABEL: name: test_build_vector_v_v2s32_v_s32_s_s32 ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %0:vgpr(s32) = COPY $vgpr0 @@ -79,8 +79,8 @@ body: | ; GCN-LABEL: name: test_build_vector_s_v2s32_s_s32_s_s32 ; GCN: liveins: $sgpr0, $sgpr1 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 @@ -101,8 +101,8 @@ body: | ; GCN-LABEL: name: test_build_vector_s_v2s32_undef_s_s32_s_s32 ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE undef %2:sreg_32_xm0, %subreg.sub0, [[COPY]], %subreg.sub1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE undef %2:sreg_32, %subreg.sub0, [[COPY]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %1:sgpr(s32) = COPY $sgpr0 %2:sgpr(<2 x s32>) = G_BUILD_VECTOR undef %0:sgpr(s32), %1 @@ -121,8 +121,8 @@ body: | ; GCN-LABEL: name: test_build_vector_s_v2s32_s_s32_undef_s_s32 ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32_xm0, %subreg.sub1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32, %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 %2:sgpr(<2 x s32>) = G_BUILD_VECTOR %0, undef %1:sgpr(s32), diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir index d335ce8..838edbb 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir @@ -38,7 +38,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: test_concat_vectors_v_v4s16_s_v2s16_v_v2s16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: $vgpr0_vgpr1 = COPY [[REG_SEQUENCE]] @@ -59,7 +59,7 @@ body: | ; GCN-LABEL: name: test_concat_vectors_v_v4s16_v_v2s16_s_v2s16 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: $vgpr0_vgpr1 = COPY [[REG_SEQUENCE]] %0:vgpr(<2 x s16>) = COPY $vgpr0 @@ -78,8 +78,8 @@ body: | liveins: $sgpr0, $sgpr1 ; GCN-LABEL: name: test_concat_vectors_s_v4s16_s_v2s16_s_v2s16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: $sgpr0_sgpr1 = COPY [[REG_SEQUENCE]] %0:sgpr(<2 x s16>) = COPY $sgpr0 @@ -98,9 +98,9 @@ body: | liveins: $sgpr0, $sgpr1, $sgpr2 ; GCN-LABEL: name: test_concat_vectors_s_s96_s_v2s16_s_v2s16_s_v2s16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 ; GCN: $sgpr0_sgpr1_sgpr2 = COPY [[REG_SEQUENCE]] %0:sgpr(<2 x s16>) = COPY $sgpr0 @@ -142,10 +142,10 @@ body: | liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 ; GCN-LABEL: name: test_concat_vectors_s_v8s16_s_v2s16_s_v2s16_s_v2s16_s_v2s16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]] %0:sgpr(<2 x s16>) = COPY $sgpr0 @@ -230,11 +230,11 @@ body: | liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 ; GCN-LABEL: name: test_concat_vectors_s_s160_s_v2s16_s_v2s16_s_v2s16_s_v2s16_s_v2s16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_160 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3, [[COPY4]], %subreg.sub4 ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4 = COPY [[REG_SEQUENCE]] %0:sgpr(<2 x s16>) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir index 2acf1ae..065cf9e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir @@ -18,8 +18,8 @@ body: | ; GCN: %{{[0-9]+}}:sreg_32 = S_MOV_B32 1 %2:sreg_32(s32) = G_CONSTANT i32 1 - ; GCN: [[LO0:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 - ; GCN: [[HI0:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; GCN: [[LO0:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN: [[HI0:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; GCN: %{{[0-9]+}}:sreg_64_xexec = REG_SEQUENCE [[LO0]], %subreg.sub0, [[HI0]], %subreg.sub1 %3:sgpr(s64) = G_CONSTANT i64 4294967296 @@ -29,8 +29,8 @@ body: | ; GCN: %5:sreg_64_xexec = S_MOV_B64 4607182418800017408 %5:sgpr(s64) = G_FCONSTANT double 1.0 - ; GCN: [[LO1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 - ; GCN: [[HI1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1076101120 + ; GCN: [[LO1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN: [[HI1:%[0-9]+]]:sreg_32 = S_MOV_B32 1076101120 ; GCN: %{{[0-9]+}}:sreg_64_xexec = REG_SEQUENCE [[LO1]], %subreg.sub0, [[HI1]], %subreg.sub1 %6:sgpr(s64) = G_FCONSTANT double 10.0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir index 558f672..53fbcb4 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir @@ -43,7 +43,7 @@ body: | ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; WAVE64: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; WAVE64: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; WAVE64: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 0, [[COPY3]], implicit $exec ; WAVE64: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY2]], 0, [[COPY1]], [[V_CMP_NE_U32_e64_]], implicit $exec ; WAVE64: FLAT_STORE_DWORD [[COPY]], [[V_CNDMASK_B32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1) @@ -52,7 +52,7 @@ body: | ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; WAVE32: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; WAVE32: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; WAVE32: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_NE_U32_e64 0, [[COPY3]], implicit $exec ; WAVE32: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY2]], 0, [[COPY1]], [[V_CMP_NE_U32_e64_]], implicit $exec ; WAVE32: GLOBAL_STORE_DWORD [[COPY]], [[V_CNDMASK_B32_e64_]], 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1) @@ -78,7 +78,7 @@ body: | ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; WAVE64: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; WAVE64: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; WAVE64: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 0, [[COPY3]], implicit $exec ; WAVE64: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY2]], 0, [[COPY1]], [[V_CMP_NE_U32_e64_]], implicit $exec ; WAVE64: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 0, [[COPY3]], implicit $exec @@ -89,7 +89,7 @@ body: | ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; WAVE32: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; WAVE32: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; WAVE32: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; WAVE32: [[COPY4:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[COPY3]] ; WAVE32: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY2]], 0, [[COPY1]], [[COPY4]], implicit $exec ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_NE_U32_e64 0, [[COPY3]], implicit $exec @@ -218,7 +218,7 @@ body: | ; WAVE64-LABEL: name: copy_sgpr_s1_to_vcc ; WAVE64: bb.0: ; WAVE64: successors: %bb.1(0x80000000) - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE64: $vcc = COPY [[V_CMP_NE_U32_e64_]] ; WAVE64: S_CBRANCH_VCCNZ %bb.1, implicit $vcc @@ -227,8 +227,8 @@ body: | ; WAVE32: bb.0: ; WAVE32: successors: %bb.1(0x80000000) ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE32: $vcc_lo = COPY [[V_CMP_NE_U32_e64_]] ; WAVE32: S_CBRANCH_VCCNZ %bb.1, implicit $vcc_lo ; WAVE32: bb.1: @@ -254,13 +254,13 @@ body: | bb.0: liveins: $sgpr0_sgpr1 ; WAVE64-LABEL: name: copy_sgpr_s1_to_vcc_constrain - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NE_U32_e64_]] ; WAVE32-LABEL: name: copy_sgpr_s1_to_vcc_constrain ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NE_U32_e64_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s1) = G_TRUNC %0 @@ -281,14 +281,14 @@ body: | liveins: $sgpr0_sgpr1 ; WAVE64-LABEL: name: copy_s1_vcc_to_vcc - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE64: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[V_CMP_NE_U32_e64_]] ; WAVE64: S_ENDPGM 0, implicit [[COPY1]] ; WAVE32-LABEL: name: copy_s1_vcc_to_vcc ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[V_CMP_NE_U32_e64_]] ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ctpop.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ctpop.mir index b44e947..721eac0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ctpop.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ctpop.mir @@ -33,7 +33,7 @@ body: | ; CHECK-LABEL: name: ctpop_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_BCNT_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_BCNT_U32_B32_e64 [[COPY]], 0, implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_BCNT_U32_B32_e64_]] %0:sgpr(s32) = COPY $sgpr0 @@ -145,7 +145,7 @@ body: | ; CHECK-LABEL: name: add_ctpop_s32_v_vs_commute0 ; CHECK: liveins: $vgpr0, $sgpr0 ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_BCNT_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_BCNT_U32_B32_e64 [[COPY]], [[COPY1]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_BCNT_U32_B32_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -169,7 +169,7 @@ body: | ; CHECK-LABEL: name: add_ctpop_s32_v_sv_commute0 ; CHECK: liveins: $vgpr0, $sgpr0 ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_BCNT_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_BCNT_U32_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_BCNT_U32_B32_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -192,7 +192,7 @@ body: | ; CHECK-LABEL: name: add_ctpop_s32_s_sv_commute0 ; CHECK: liveins: $sgpr0, $vgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[V_BCNT_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_BCNT_U32_B32_e64 [[COPY]], [[COPY1]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_BCNT_U32_B32_e64_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir index 13e5c94..0cf388d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir @@ -9,22 +9,22 @@ body: | bb.0: ; CHECK-LABEL: name: extract512 ; CHECK: [[DEF:%[0-9]+]]:sreg_512 = IMPLICIT_DEF - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub0 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub1 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub2 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub3 - ; CHECK: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub4 - ; CHECK: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub5 - ; CHECK: [[COPY6:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub6 - ; CHECK: [[COPY7:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub7 - ; CHECK: [[COPY8:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub8 - ; CHECK: [[COPY9:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub9 - ; CHECK: [[COPY10:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub10 - ; CHECK: [[COPY11:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub11 - ; CHECK: [[COPY12:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub12 - ; CHECK: [[COPY13:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub13 - ; CHECK: [[COPY14:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub14 - ; CHECK: [[COPY15:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub15 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub2 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub3 + ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub4 + ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub5 + ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub6 + ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub7 + ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub8 + ; CHECK: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub9 + ; CHECK: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub10 + ; CHECK: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub11 + ; CHECK: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub12 + ; CHECK: [[COPY13:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub13 + ; CHECK: [[COPY14:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub14 + ; CHECK: [[COPY15:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub15 ; CHECK: $sgpr0 = COPY [[COPY]] ; CHECK: $sgpr1 = COPY [[COPY1]] ; CHECK: $sgpr2 = COPY [[COPY2]] @@ -87,38 +87,38 @@ body: | bb.0: ; CHECK-LABEL: name: extract_s_s32_s1024 ; CHECK: [[DEF:%[0-9]+]]:sreg_1024 = IMPLICIT_DEF - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub0 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub1 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub2 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub3 - ; CHECK: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub4 - ; CHECK: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub5 - ; CHECK: [[COPY6:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub6 - ; CHECK: [[COPY7:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub7 - ; CHECK: [[COPY8:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub8 - ; CHECK: [[COPY9:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub9 - ; CHECK: [[COPY10:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub10 - ; CHECK: [[COPY11:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub11 - ; CHECK: [[COPY12:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub12 - ; CHECK: [[COPY13:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub13 - ; CHECK: [[COPY14:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub14 - ; CHECK: [[COPY15:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub15 - ; CHECK: [[COPY16:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub16 - ; CHECK: [[COPY17:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub17 - ; CHECK: [[COPY18:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub18 - ; CHECK: [[COPY19:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub19 - ; CHECK: [[COPY20:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub20 - ; CHECK: [[COPY21:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub21 - ; CHECK: [[COPY22:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub22 - ; CHECK: [[COPY23:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub23 - ; CHECK: [[COPY24:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub24 - ; CHECK: [[COPY25:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub25 - ; CHECK: [[COPY26:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub26 - ; CHECK: [[COPY27:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub27 - ; CHECK: [[COPY28:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub28 - ; CHECK: [[COPY29:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub29 - ; CHECK: [[COPY30:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub30 - ; CHECK: [[COPY31:%[0-9]+]]:sreg_32_xm0 = COPY [[DEF]].sub31 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub2 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub3 + ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub4 + ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub5 + ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub6 + ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub7 + ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub8 + ; CHECK: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub9 + ; CHECK: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub10 + ; CHECK: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub11 + ; CHECK: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub12 + ; CHECK: [[COPY13:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub13 + ; CHECK: [[COPY14:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub14 + ; CHECK: [[COPY15:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub15 + ; CHECK: [[COPY16:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub16 + ; CHECK: [[COPY17:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub17 + ; CHECK: [[COPY18:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub18 + ; CHECK: [[COPY19:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub19 + ; CHECK: [[COPY20:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub20 + ; CHECK: [[COPY21:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub21 + ; CHECK: [[COPY22:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub22 + ; CHECK: [[COPY23:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub23 + ; CHECK: [[COPY24:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub24 + ; CHECK: [[COPY25:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub25 + ; CHECK: [[COPY26:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub26 + ; CHECK: [[COPY27:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub27 + ; CHECK: [[COPY28:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub28 + ; CHECK: [[COPY29:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub29 + ; CHECK: [[COPY30:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub30 + ; CHECK: [[COPY31:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub31 ; CHECK: S_ENDPGM 0, implicit [[DEF]], implicit [[COPY]], implicit [[COPY1]], implicit [[COPY2]], implicit [[COPY3]], implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY6]], implicit [[COPY7]], implicit [[COPY8]], implicit [[COPY9]], implicit [[COPY10]], implicit [[COPY11]], implicit [[COPY12]], implicit [[COPY13]], implicit [[COPY14]], implicit [[COPY15]], implicit [[COPY16]], implicit [[COPY17]], implicit [[COPY18]], implicit [[COPY19]], implicit [[COPY20]], implicit [[COPY21]], implicit [[COPY22]], implicit [[COPY23]], implicit [[COPY24]], implicit [[COPY25]], implicit [[COPY26]], implicit [[COPY27]], implicit [[COPY28]], implicit [[COPY29]], implicit [[COPY30]], implicit [[COPY31]] %0:sgpr(s1024) = G_IMPLICIT_DEF %1:sgpr(s32) = G_EXTRACT %0:sgpr, 0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir index 56e9e98..8bdb565 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir @@ -16,7 +16,7 @@ body: | ; GCN: liveins: $sgpr0 ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483647 - ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_FABS %0 @@ -75,7 +75,7 @@ body: | ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147450879 - ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_AND_B32_]] %0:sgpr(<2 x s16>) = COPY $sgpr0 %1:sgpr(<2 x s16>) = G_FABS %0 @@ -93,9 +93,9 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: fabs_s16_ss ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767 - ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir index 9f891e9..ad270a1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir @@ -43,7 +43,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -68,7 +68,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -93,7 +93,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -118,7 +118,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -143,7 +143,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -168,7 +168,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -193,7 +193,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_O_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -218,7 +218,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -243,7 +243,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -268,7 +268,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -293,7 +293,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -318,7 +318,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -343,7 +343,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -368,7 +368,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F32_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -441,7 +441,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -466,7 +466,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -491,7 +491,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -516,7 +516,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -541,7 +541,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -566,7 +566,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -591,7 +591,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_O_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -616,7 +616,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -641,7 +641,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -666,7 +666,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -691,7 +691,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -716,7 +716,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -741,7 +741,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 @@ -766,7 +766,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 - ; WAVE32: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F64_e64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s64) = COPY $vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir index 99cba93..42b017b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir @@ -49,7 +49,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -76,7 +76,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -103,7 +103,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -130,7 +130,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -157,7 +157,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -183,7 +183,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -210,7 +210,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -237,7 +237,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -264,7 +264,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -291,7 +291,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -318,7 +318,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -345,7 +345,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -372,7 +372,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -399,7 +399,7 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec + ; WAVE32: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F16_e64_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.mir index aae30e6..76f16eb 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.mir @@ -33,7 +33,7 @@ body: | ; CHECK-LABEL: name: ffloor_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 @@ -94,7 +94,7 @@ body: | ; CHECK-LABEL: name: ffloor_fneg_s32_vs ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $exec ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir index fdb0155..fa462ac 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir @@ -15,8 +15,8 @@ body: | ; VI: liveins: $sgpr0 ; VI: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; VI: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) - ; VI: [[FFLOOR:%[0-9]+]]:sreg_32_xm0(s16) = G_FFLOOR [[TRUNC]] - ; VI: [[COPY1:%[0-9]+]]:sreg_32_xm0(s32) = COPY [[FFLOOR]](s16) + ; VI: [[FFLOOR:%[0-9]+]]:sreg_32(s16) = G_FFLOOR [[TRUNC]] + ; VI: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FFLOOR]](s16) ; VI: $sgpr0 = COPY [[COPY1]](s32) %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -59,7 +59,7 @@ body: | ; VI-LABEL: name: ffloor_s16_vs ; VI: liveins: $sgpr0 - ; VI: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; VI: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $exec ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir index 5ee17f7..1ab91db 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir @@ -14,7 +14,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fmaxnum_ieee_f32_f64_ieee_mode_on - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 @@ -82,7 +82,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fmaxnum_ieee_f32_f64_ieee_mode_off - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir index 0ad1971..5c886e0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir @@ -15,7 +15,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fmaxnum_f32_f64_ieee_mode_on - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 @@ -81,7 +81,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fmaxnum_f32_f64_ieee_mode_off - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir index f0fcc61..eee3130 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir @@ -14,7 +14,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fminnum_ieee_f32_f64_ieee_mode_on - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 @@ -82,7 +82,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fminnum_ieee_f32_f64_ieee_mode_off - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir index 54d96e8..3d4a09e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir @@ -15,7 +15,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fminnum_f32_f64_ieee_mode_on - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 @@ -81,7 +81,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13 ; GFX7-LABEL: name: fminnum_f32_f64_ieee_mode_off - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir index aa7e0e7..34d0d5f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir @@ -11,7 +11,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4 ; GCN-LABEL: name: fmul_f32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 @@ -84,7 +84,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4 ; GCN-LABEL: name: fmul_f16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir index 3618e59..4a39f47 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir @@ -16,7 +16,7 @@ body: | ; GCN: liveins: $sgpr0 ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648 - ; GCN: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_XOR_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_FNEG %0 @@ -73,9 +73,9 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: fneg_s16_ss ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768 - ; GCN: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_XOR_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -143,7 +143,7 @@ body: | ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147516416 - ; GCN: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_XOR_B32_]] %0:sgpr(<2 x s16>) = COPY $sgpr0 %1:sgpr(<2 x s16>) = G_FNEG %0 @@ -263,7 +263,7 @@ body: | ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648 - ; GCN: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_OR_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_FABS %0 @@ -325,9 +325,9 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: fneg_fabs_s16_ss ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768 - ; GCN: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_OR_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -351,7 +351,7 @@ body: | ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768 ; GCN: [[V_OR_B32_e32_:%[0-9]+]]:vgpr_32 = V_OR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[V_OR_B32_e32_]] + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_OR_B32_e32_]] ; GCN: $vgpr0 = COPY [[COPY1]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s16) = G_TRUNC %0 @@ -377,7 +377,7 @@ body: | ; GCN: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) ; GCN: [[FNEG:%[0-9]+]]:sgpr(s16) = G_FNEG [[TRUNC]] ; GCN: [[FNEG1:%[0-9]+]]:vgpr_32(s16) = G_FNEG [[FNEG]] - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0(s32) = COPY [[FNEG1]](s16) + ; GCN: [[COPY1:%[0-9]+]]:sreg_32(s32) = COPY [[FNEG1]](s16) ; GCN: $vgpr0 = COPY [[COPY1]](s32) %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -400,7 +400,7 @@ body: | ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147516416 - ; GCN: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; GCN: $sgpr0 = COPY [[S_OR_B32_]] %0:sgpr(<2 x s16>) = COPY $sgpr0 %1:sgpr(<2 x s16>) = G_FABS %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir index f4c961a..e1e8c0e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir @@ -33,7 +33,7 @@ body: | ; GCN-LABEL: name: fptosi_s32_to_s32_vs ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $exec ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]] %0:sgpr(s32) = COPY $sgpr0 @@ -96,7 +96,7 @@ body: | ; GCN-LABEL: name: fptosi_s16_to_s32_vs ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $exec ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir index 8b78ea6..eb4a548 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir @@ -12,7 +12,7 @@ body: | liveins: $sgpr0, $vgpr0, $vgpr3_vgpr4 ; GCN-LABEL: name: fptoui - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; GCN: [[V_CVT_U32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $exec @@ -69,7 +69,7 @@ body: | ; GCN-LABEL: name: fptoui_s16_to_s32_vs ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $exec ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frame-index.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frame-index.mir index ea67272..1747fa5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frame-index.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frame-index.mir @@ -12,7 +12,7 @@ stack: body: | bb.0: ; GCN-LABEL: name: frame_index_s - ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 %stack.0 + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 ; GCN: $sgpr0 = COPY [[S_MOV_B32_]] %0:sgpr(p5) = G_FRAME_INDEX %stack.0 $sgpr0 = COPY %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-gep.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-gep.mir index e03637e..eb685a6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-gep.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-gep.mir @@ -319,30 +319,30 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: gep_p3_sgpr_vgpr - ; GFX6: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX6: %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX6: S_ENDPGM 0, implicit %2 ; GFX8-LABEL: name: gep_p3_sgpr_vgpr ; GFX8: $vcc_hi = IMPLICIT_DEF - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: %2:vgpr_32, dead %3:sreg_32_xm0_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX8: S_ENDPGM 0, implicit %2 ; GFX9-LABEL: name: gep_p3_sgpr_vgpr ; GFX9: $vcc_hi = IMPLICIT_DEF - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_ADD_U32_e64_]] ; GFX10-WAVE64-LABEL: name: gep_p3_sgpr_vgpr - ; GFX10-WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10-WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10-WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10-WAVE64: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX10-WAVE64: S_ENDPGM 0, implicit [[V_ADD_U32_e64_]] ; GFX10-WAVE32-LABEL: name: gep_p3_sgpr_vgpr ; GFX10-WAVE32: $vcc_hi = IMPLICIT_DEF - ; GFX10-WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10-WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10-WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10-WAVE32: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX10-WAVE32: S_ENDPGM 0, implicit [[V_ADD_U32_e64_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-icmp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-icmp.mir index d2fe30a..44b1fc8 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-icmp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-icmp.mir @@ -17,9 +17,9 @@ regBankSelected: true # GCN: [[SGPR6:%[0-9]+]]:sreg_32 = COPY $sgpr6 # GCN: [[SGPR7:%[0-9]+]]:sreg_32 = COPY $sgpr7 # GCN: S_CMP_LG_U32 [[SGPR0]], [[SGPR1]], implicit-def $scc -# GCN-NEXT: [[COND0:%[0-9]+]]:sreg_32_xm0 = COPY $scc +# GCN-NEXT: [[COND0:%[0-9]+]]:sgpr_32 = COPY $scc # GCN: S_CMP_LG_U32 [[SGPR4]], [[SGPR5]], implicit-def $scc -# GCN-NEXT: [[COND1:%[0-9]+]]:sreg_32_xm0 = COPY $scc +# GCN-NEXT: [[COND1:%[0-9]+]]:sgpr_32 = COPY $scc # GCN: $scc = COPY [[COND0]] # GCN-NEXT: S_CSELECT_B32 [[SGPR6]], [[SGPR7]], implicit $scc # GCN: $scc = COPY [[COND1]] @@ -244,7 +244,7 @@ regBankSelected: true # GCN-LABEL: name: icmp_s32_vs # GCN: [[VGPR2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 -# GCN: [[SGPR0:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 +# GCN: [[SGPR0:%[0-9]+]]:sreg_32 = COPY $sgpr0 # GCN: V_CMP_NE_U32_e64 [[VGPR2]], [[SGPR0]] body: | @@ -268,7 +268,7 @@ regBankSelected: true # GCN-LABEL: name: icmp_s32_sv # GCN: [[VGPR2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 -# GCN: [[SGPR0:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 +# GCN: [[SGPR0:%[0-9]+]]:sreg_32 = COPY $sgpr0 # GCN: V_CMP_NE_U32_e64 [[SGPR0]], [[VGPR2]] body: | @@ -292,7 +292,7 @@ regBankSelected: true # GCN-LABEL: name: icmp_s32_or_vcc # GCN: [[VGPR2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 -# GCN: [[SGPR0:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 +# GCN: [[SGPR0:%[0-9]+]]:sreg_32 = COPY $sgpr0 # GCN: V_CMP_NE_U32_e64 [[SGPR0]], [[VGPR2]] body: | diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir index fec8155..dc3175e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir @@ -15,7 +15,7 @@ regBankSelected: true body: | bb.0: ; GCN-LABEL: name: implicit_def_s32_sgpr - ; GCN: [[DEF:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: S_ENDPGM 0, implicit [[DEF]] %0:sgpr(s32) = G_IMPLICIT_DEF S_ENDPGM 0, implicit %0 @@ -169,7 +169,7 @@ regBankSelected: true body: | bb.0: ; GCN-LABEL: name: implicit_def_s1_sgpr - ; GCN: [[DEF:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: S_ENDPGM 0, implicit [[DEF]] %0:sgpr(s1) = G_IMPLICIT_DEF S_ENDPGM 0, implicit %0 @@ -184,7 +184,7 @@ regBankSelected: true body: | bb.0: ; GCN-LABEL: name: implicit_def_s1_scc - ; GCN: [[DEF:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; GCN: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF ; GCN: S_ENDPGM 0, implicit [[DEF]] %0:scc(s1) = G_IMPLICIT_DEF S_ENDPGM 0, implicit %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir index 7e39fa7..ffa909c 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir @@ -11,7 +11,7 @@ body: | bb.0: ; CHECK-LABEL: name: insert_s512_s32 ; CHECK: [[DEF:%[0-9]+]]:sreg_512 = IMPLICIT_DEF - ; CHECK: [[DEF1:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; CHECK: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[DEF]], [[DEF1]], %subreg.sub0 ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG]], [[DEF1]], %subreg.sub1 ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:sreg_512 = INSERT_SUBREG [[INSERT_SUBREG1]], [[DEF1]], %subreg.sub2 @@ -97,7 +97,7 @@ body: | liveins: $sgpr0_sgpr1, $sgpr2 ; CHECK-LABEL: name: insert_s_s64_s_s32_0 ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_64_xexec = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0 ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 @@ -117,7 +117,7 @@ body: | liveins: $sgpr0_sgpr1, $sgpr2 ; CHECK-LABEL: name: insert_s_s64_s_s32_32 ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_64_xexec = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1 ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 @@ -157,7 +157,7 @@ body: | liveins: $vgpr0_vgpr1, $sgpr0 ; CHECK-LABEL: name: insert_v_s64_s_s32_32 ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:vreg_64 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1 ; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir index e48716c..f0d6139 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir @@ -741,7 +741,7 @@ body: | ; GFX6-LABEL: name: load_constant_s32_from_4_gep_1024 ; GFX6: liveins: $sgpr0_sgpr1 ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1024 + ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1024 ; GFX6: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX6: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] ; GFX7-LABEL: name: load_constant_s32_from_4_gep_1024 @@ -782,7 +782,7 @@ body: | ; GFX6-LABEL: name: load_constant_s32_from_4_gep_1048575 ; GFX6: liveins: $sgpr0_sgpr1 ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048575 + ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1048575 ; GFX6: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX6: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] ; GFX7-LABEL: name: load_constant_s32_from_4_gep_1048575 @@ -823,7 +823,7 @@ body: | ; GFX6-LABEL: name: load_constant_s32_from_4_gep_1048576 ; GFX6: liveins: $sgpr0_sgpr1 ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576 + ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1048576 ; GFX6: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX6: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] ; GFX7-LABEL: name: load_constant_s32_from_4_gep_1048576 @@ -834,14 +834,14 @@ body: | ; GFX8-LABEL: name: load_constant_s32_from_4_gep_1048576 ; GFX8: liveins: $sgpr0_sgpr1 ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576 + ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1048576 ; GFX8: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX8: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] ; GFX10-LABEL: name: load_constant_s32_from_4_gep_1048576 ; GFX10: liveins: $sgpr0_sgpr1 ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX10: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576 + ; GFX10: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1048576 ; GFX10: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] %0:sgpr(p4) = COPY $sgpr0_sgpr1 @@ -866,7 +866,7 @@ body: | ; GFX6-LABEL: name: load_constant_s32_from_4_gep_1073741823 ; GFX6: liveins: $sgpr0_sgpr1 ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1073741823 + ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1073741823 ; GFX6: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX6: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] ; GFX7-LABEL: name: load_constant_s32_from_4_gep_1073741823 @@ -877,14 +877,14 @@ body: | ; GFX8-LABEL: name: load_constant_s32_from_4_gep_1073741823 ; GFX8: liveins: $sgpr0_sgpr1 ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1073741823 + ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1073741823 ; GFX8: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX8: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] ; GFX10-LABEL: name: load_constant_s32_from_4_gep_1073741823 ; GFX10: liveins: $sgpr0_sgpr1 ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 - ; GFX10: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1073741823 + ; GFX10: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1073741823 ; GFX10: [[S_LOAD_DWORD_SGPR:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_SGPR [[COPY]], [[S_MOV_B32_]], 0, 0 :: (load 4, addrspace 4) ; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]] %0:sgpr(p4) = COPY $sgpr0_sgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir index 30cb3f0..648e48b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir @@ -25,25 +25,25 @@ regBankSelected: true # VI: S_LOAD_DWORD_IMM [[PTR]], 1020, 0, 0 # Immediate overflow for SI -# SI: [[K1024:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1024 +# SI: [[K1024:%[0-9]+]]:sreg_32 = S_MOV_B32 1024 # SI: S_LOAD_DWORD_SGPR [[PTR]], [[K1024]], 0 # CI: S_LOAD_DWORD_IMM_ci [[PTR]], 256, 0 # VI: S_LOAD_DWORD_IMM [[PTR]], 1024, 0 # Max immediate offset for VI -# SI: [[K1048572:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048572 +# SI: [[K1048572:%[0-9]+]]:sreg_32 = S_MOV_B32 1048572 # CI: S_LOAD_DWORD_IMM_ci [[PTR]], 262143 # VI: S_LOAD_DWORD_IMM [[PTR]], 1048572 # # Immediate overflow for VI -# SIVI: [[K1048576:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576 +# SIVI: [[K1048576:%[0-9]+]]:sreg_32 = S_MOV_B32 1048576 # SIVI: S_LOAD_DWORD_SGPR [[PTR]], [[K1048576]], 0 # CI: S_LOAD_DWORD_IMM_ci [[PTR]], 262144, 0 # Max immediate for CI -# SIVI: [[K_LO:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4294967292 -# SIVI: [[K_HI:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 3 +# SIVI: [[K_LO:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967292 +# SIVI: [[K_HI:%[0-9]+]]:sreg_32 = S_MOV_B32 3 # SIVI: [[K:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1 # SIVI-DAG: [[K_SUB0:%[0-9]+]]:sreg_32 = COPY [[K]].sub0 # SIVI-DAG: [[PTR_LO:%[0-9]+]]:sreg_32 = COPY [[PTR]].sub0 @@ -56,8 +56,8 @@ regBankSelected: true # CI: S_LOAD_DWORD_IMM_ci [[PTR]], 4294967295, 0, 0 # Immediate overflow for CI -# GCN: [[K_LO:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 -# GCN: [[K_HI:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4 +# GCN: [[K_LO:%[0-9]+]]:sreg_32 = S_MOV_B32 0 +# GCN: [[K_HI:%[0-9]+]]:sreg_32 = S_MOV_B32 4 # GCN: [[K:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1 # GCN-DAG: [[K_SUB0:%[0-9]+]]:sreg_32 = COPY [[K]].sub0 # GCN-DAG: [[PTR_LO:%[0-9]+]]:sreg_32 = COPY [[PTR]].sub0 @@ -69,13 +69,13 @@ regBankSelected: true # GCN: S_LOAD_DWORD_IMM [[ADD_PTR]], 0, 0, 0 # Max 32-bit byte offset -# SIVI: [[K4294967292:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4294967292 +# SIVI: [[K4294967292:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967292 # SIVI: S_LOAD_DWORD_SGPR [[PTR]], [[K4294967292]], 0 # CI: S_LOAD_DWORD_IMM_ci [[PTR]], 1073741823, 0 # Overflow 32-bit byte offset -# SIVI: [[K_LO:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 -# SIVI: [[K_HI:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 +# SIVI: [[K_LO:%[0-9]+]]:sreg_32 = S_MOV_B32 0 +# SIVI: [[K_HI:%[0-9]+]]:sreg_32 = S_MOV_B32 1 # SIVI: [[K:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1 # SIVI-DAG: [[K_SUB0:%[0-9]+]]:sreg_32 = COPY [[K]].sub0 # SIVI-DAG: [[PTR_LO:%[0-9]+]]:sreg_32 = COPY [[PTR]].sub0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir index d8dfcce..da8fe27 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir @@ -54,28 +54,28 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: lshr_s32_sv - ; GFX6: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX6: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX7-LABEL: name: lshr_s32_sv - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX8-LABEL: name: lshr_s32_sv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX9-LABEL: name: lshr_s32_sv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX10-LABEL: name: lshr_s32_sv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] @@ -95,28 +95,28 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: lshr_s32_vs ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX7-LABEL: name: lshr_s32_vs ; GFX7: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX8-LABEL: name: lshr_s32_vs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX9-LABEL: name: lshr_s32_vs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] ; GFX10-LABEL: name: lshr_s32_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B32_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -255,28 +255,28 @@ body: | liveins: $sgpr0, $vgpr0_vgpr1 ; GFX6-LABEL: name: lshr_s64_vs ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX7-LABEL: name: lshr_s64_vs ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX8-LABEL: name: lshr_s64_vs ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] ; GFX9-LABEL: name: lshr_s64_vs ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] ; GFX10-LABEL: name: lshr_s64_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir index 95daab6..866ab39 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir @@ -66,18 +66,18 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX8-LABEL: name: lshr_s16_s16_vs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] ; GFX9-LABEL: name: lshr_s16_s16_vs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] ; GFX10-LABEL: name: lshr_s16_s16_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -310,18 +310,18 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX8-LABEL: name: lshr_s16_s16_sv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] ; GFX9-LABEL: name: lshr_s16_s16_sv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] ; GFX10-LABEL: name: lshr_s16_s16_sv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir index cb4ebab..d2fa37f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir @@ -42,7 +42,7 @@ body: | ; GCN-LABEL: name: test_merge_values_v_s64_s_s32_v_s32 ; GCN: liveins: $sgpr0, $vgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] @@ -65,7 +65,7 @@ body: | ; GCN-LABEL: name: test_merge_values_v_s64_v_s32_s_s32 ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %0:vgpr(s32) = COPY $vgpr0 @@ -86,8 +86,8 @@ body: | ; GCN-LABEL: name: test_merge_values_s_s64_s_s32_s_s32 ; GCN: liveins: $sgpr0, $sgpr1 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 @@ -108,8 +108,8 @@ body: | ; GCN-LABEL: name: test_merge_values_s_s64_undef_s_s32_s_s32 ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE undef %2:sreg_32_xm0, %subreg.sub0, [[COPY]], %subreg.sub1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE undef %2:sreg_32, %subreg.sub0, [[COPY]], %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %1:sgpr(s32) = COPY $sgpr0 %2:sgpr(s64) = G_MERGE_VALUES undef %0:sgpr(s32), %1 @@ -128,8 +128,8 @@ body: | ; GCN-LABEL: name: test_merge_values_s_s64_s_s32_undef_s_s32 ; GCN: liveins: $sgpr0 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32_xm0, %subreg.sub1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32, %subreg.sub1 ; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 %2:sgpr(s64) = G_MERGE_VALUES %0, undef %1:sgpr(s32), @@ -146,9 +146,9 @@ body: | liveins: $sgpr0, $sgpr1, $sgpr2 ; GCN-LABEL: name: test_merge_values_s_s96_s_s32_s_s32_s_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 ; GCN: $sgpr0_sgpr1_sgpr2 = COPY [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 @@ -194,10 +194,10 @@ body: | ; GCN-LABEL: name: test_merge_values_s_s128_s_s32_s_s32_s_s32_s_s32 ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 @@ -290,11 +290,11 @@ body: | ; GCN-LABEL: name: test_merge_values_s_s160_s_s32_s_s32_s_s32_s_s32_s_s32 ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_160 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3, [[COPY4]], %subreg.sub4 ; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4 = COPY [[REG_SEQUENCE]] %0:sgpr(s32) = COPY $sgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-mul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-mul.mir index 2352f2b..1dfbf74 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-mul.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-mul.mir @@ -29,7 +29,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: mul_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MUL_LO_U32_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_LO_U32_]] @@ -49,7 +49,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: mul_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MUL_LO_U32_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_LO_U32_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir index 1423b0a..1ebc090 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir @@ -28,9 +28,9 @@ body: | ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; WAVE32: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; WAVE32: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec - ; WAVE32: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_U32_e64 [[COPY1]], [[V_MOV_B32_e32_]], implicit $exec - ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 [[V_CMP_EQ_U32_e64_]], [[V_CMP_EQ_U32_e64_1]] + ; WAVE32: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec + ; WAVE32: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[COPY1]], [[V_MOV_B32_e32_]], implicit $exec + ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[V_CMP_EQ_U32_e64_]], [[V_CMP_EQ_U32_e64_1]] ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -53,15 +53,15 @@ body: | liveins: $sgpr0, $sgpr1 ; WAVE64-LABEL: name: or_s1_sgpr_sgpr_sgpr ; WAVE64: liveins: $sgpr0, $sgpr1 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE64: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]] ; WAVE64: S_ENDPGM 0, implicit [[S_OR_B32_]] ; WAVE32-LABEL: name: or_s1_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]] ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] %0:sgpr(s32) = COPY $sgpr0 @@ -118,15 +118,15 @@ body: | liveins: $sgpr0, $sgpr1 ; WAVE64-LABEL: name: or_s16_sgpr_sgpr_sgpr ; WAVE64: liveins: $sgpr0, $sgpr1 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE64: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]] ; WAVE64: S_ENDPGM 0, implicit [[S_OR_B32_]] ; WAVE32-LABEL: name: or_s16_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]] ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] %0:sgpr(s32) = COPY $sgpr0 @@ -415,7 +415,7 @@ body: | ; WAVE64: S_ENDPGM 0, implicit [[S_OR_B64_]] ; WAVE32-LABEL: name: or_s1_vcc_undef_vcc_undef_vcc ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 undef %1:sreg_32_xm0, undef %2:sreg_32_xm0 + ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 undef %1:sreg_32, undef %2:sreg_32 ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] %2:vcc(s1) = G_OR undef %0:vcc(s1), undef %1:vcc(s1) S_ENDPGM 0, implicit %2 @@ -483,9 +483,9 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec - ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY1]], implicit $exec - ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY1]], implicit $exec + ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -513,22 +513,22 @@ body: | ; WAVE64-LABEL: name: copy_select_constrain_vcc_result_reg_wave32 ; WAVE64: liveins: $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]] - ; WAVE64: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B32_]] - ; WAVE64: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY1]], [[COPY2]] - ; WAVE64: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[S_OR_B32_]] - ; WAVE64: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY3]] - ; WAVE64: S_ENDPGM 0, implicit [[COPY4]] + ; WAVE64: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY1]], [[S_MOV_B32_]] + ; WAVE64: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY [[S_OR_B32_]] + ; WAVE64: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY2]] + ; WAVE64: S_ENDPGM 0, implicit [[COPY3]] ; WAVE32-LABEL: name: copy_select_constrain_vcc_result_reg_wave32 ; WAVE32: liveins: $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec - ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec - ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0 = S_OR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] - ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] + ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec + ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[S_OR_B32_]] + ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %1:vgpr(s32) = COPY $vgpr0 %0:vgpr(s1) = G_TRUNC %1(s32) %2:sgpr(s1) = G_CONSTANT i1 true @@ -555,7 +555,7 @@ body: | ; WAVE64-LABEL: name: copy_select_constrain_vcc_result_reg_wave64 ; WAVE64: liveins: $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE64: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec ; WAVE64: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] @@ -565,12 +565,12 @@ body: | ; WAVE32: liveins: $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]] - ; WAVE32: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B32_]] - ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_OR_B32 [[COPY1]], [[COPY2]] - ; WAVE32: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[S_OR_B32_]] - ; WAVE32: S_ENDPGM 0, implicit [[COPY3]] + ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec + ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_OR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[S_OR_B32_]] + ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %1:vgpr(s32) = COPY $vgpr0 %0:vgpr(s1) = G_TRUNC %1(s32) %2:sgpr(s1) = G_CONSTANT i1 true diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir index be0820f..1a74d41 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir @@ -12,12 +12,12 @@ body: | ; GCN: bb.0: ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY3]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.2 @@ -25,7 +25,7 @@ body: | ; GCN: successors: %bb.2(0x80000000) ; GCN: S_BRANCH %bb.2 ; GCN: bb.2: - ; GCN: [[PHI:%[0-9]+]]:sreg_32_xm0 = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 + ; GCN: [[PHI:%[0-9]+]]:sreg_32 = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 ; GCN: $sgpr0 = COPY [[PHI]] ; GCN: S_SETPC_B64 undef $sgpr30_sgpr31 bb.0: @@ -66,13 +66,13 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY3]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.2 ; GCN: bb.1: ; GCN: successors: %bb.2(0x80000000) - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY1]] + ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY1]] ; GCN: S_BRANCH %bb.2 ; GCN: bb.2: ; GCN: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY]], %bb.0, [[COPY4]], %bb.1 @@ -107,6 +107,26 @@ regBankSelected: true tracksRegLiveness: true machineFunctionInfo: {} body: | + ; GCN-LABEL: name: g_phi_s32_sv_sbranch + ; GCN: bb.0: + ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; GCN: liveins: $sgpr0, $vgpr0, $sgpr1, $sgpr2 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc + ; GCN: $scc = COPY [[COPY3]] + ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.1: + ; GCN: successors: %bb.2(0x80000000) + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.2: + ; GCN: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 + ; GCN: $vgpr0 = COPY [[PHI]] + ; GCN: S_SETPC_B64 undef $sgpr30_sgpr31 bb.0: liveins: $sgpr0, $vgpr0, $sgpr1, $sgpr2 @@ -136,6 +156,27 @@ regBankSelected: true tracksRegLiveness: true machineFunctionInfo: {} body: | + ; GCN-LABEL: name: g_phi_s32_vs_sbranch + ; GCN: bb.0: + ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; GCN: liveins: $sgpr0, $vgpr0, $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc + ; GCN: $scc = COPY [[COPY3]] + ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.1: + ; GCN: successors: %bb.2(0x80000000) + ; GCN: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.2: + ; GCN: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY]], %bb.0, [[COPY4]], %bb.1 + ; GCN: $vgpr0 = COPY [[PHI]] + ; GCN: S_SETPC_B64 undef $sgpr30_sgpr31 bb.0: liveins: $sgpr0, $vgpr0, $sgpr1 @@ -174,7 +215,7 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY3]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.2 @@ -222,13 +263,13 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY3]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.2 ; GCN: bb.1: ; GCN: successors: %bb.2(0x80000000) - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY1]] + ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY1]] ; GCN: S_BRANCH %bb.2 ; GCN: bb.2: ; GCN: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY]], %bb.0, [[COPY4]], %bb.1 @@ -263,6 +304,27 @@ regBankSelected: true tracksRegLiveness: true machineFunctionInfo: {} body: | + ; GCN-LABEL: name: g_phi_vcc_s1_sbranch + ; GCN: bb.0: + ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; GCN: liveins: $vgpr0, $vgpr1, $sgpr2 + ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 [[COPY]], [[S_MOV_B32_]], implicit $exec + ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc + ; GCN: $scc = COPY [[COPY3]] + ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.1: + ; GCN: successors: %bb.2(0x80000000) + ; GCN: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 [[COPY1]], [[S_MOV_B32_]], implicit $exec + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.2: + ; GCN: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[V_CMP_EQ_U32_e64_]], %bb.0, [[V_CMP_EQ_U32_e64_1]], %bb.1 + ; GCN: S_SETPC_B64 undef $sgpr30_sgpr31, implicit [[PHI]] bb.0: liveins: $vgpr0, $vgpr1, $sgpr2 @@ -296,12 +358,12 @@ body: | ; GCN: bb.0: ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY3]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.2 @@ -309,7 +371,7 @@ body: | ; GCN: successors: %bb.2(0x80000000) ; GCN: S_BRANCH %bb.2 ; GCN: bb.2: - ; GCN: [[PHI:%[0-9]+]]:sreg_32_xm0 = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 + ; GCN: [[PHI:%[0-9]+]]:sreg_32 = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 ; GCN: $sgpr0 = COPY [[PHI]] ; GCN: S_SETPC_B64 undef $sgpr30_sgpr31 bb.0: @@ -350,13 +412,13 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY3]] ; GCN: S_CBRANCH_SCC1 %bb.1, implicit $scc ; GCN: S_BRANCH %bb.2 ; GCN: bb.1: ; GCN: successors: %bb.2(0x80000000) - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY1]] + ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY1]] ; GCN: S_BRANCH %bb.2 ; GCN: bb.2: ; GCN: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY]], %bb.0, [[COPY4]], %bb.1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir index f91f4fd..9a234e4 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir @@ -11,9 +11,9 @@ body: | liveins: $sgpr0 ; CHECK-LABEL: name: ptr_mask_p3_sgpr_sgpr_1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -2 - ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; CHECK: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(p3) = COPY $sgpr0 %1:sgpr(p3) = G_PTR_MASK %0, 1 @@ -31,9 +31,9 @@ body: | liveins: $sgpr0 ; CHECK-LABEL: name: ptr_mask_p3_sgpr_sgpr_2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -4 - ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; CHECK: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(p3) = COPY $sgpr0 %1:sgpr(p3) = G_PTR_MASK %0, 2 @@ -51,9 +51,9 @@ body: | liveins: $sgpr0 ; CHECK-LABEL: name: ptr_mask_p3_sgpr_sgpr_3 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -8 - ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; CHECK: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(p3) = COPY $sgpr0 %1:sgpr(p3) = G_PTR_MASK %0, 3 @@ -71,9 +71,9 @@ body: | liveins: $sgpr0 ; CHECK-LABEL: name: ptr_mask_p3_sgpr_sgpr_4 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -16 - ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; CHECK: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(p3) = COPY $sgpr0 %1:sgpr(p3) = G_PTR_MASK %0, 4 @@ -91,9 +91,9 @@ body: | liveins: $sgpr0 ; CHECK-LABEL: name: ptr_mask_p3_sgpr_sgpr_29 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -16 - ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; CHECK: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(p3) = COPY $sgpr0 %1:sgpr(p3) = G_PTR_MASK %0, 4 @@ -441,7 +441,7 @@ body: | liveins: $sgpr0 ; CHECK-LABEL: name: ptr_mask_p3_vgpr_sgpr_2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -4, implicit $exec ; CHECK: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec ; CHECK: S_ENDPGM 0, implicit [[V_AND_B32_e64_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir index 53bd5e1..8777db5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir @@ -13,7 +13,7 @@ body: | ; CHECK-LABEL: name: ptrtoint_s_p3_to_s_s32 ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: S_ENDPGM 0, implicit [[COPY]] %0:sgpr(p3) = COPY $sgpr0 %1:sgpr(s32) = G_PTRTOINT %0 @@ -33,7 +33,7 @@ body: | ; CHECK-LABEL: name: ptrtoint_s_p5_to_s_s32 ; CHECK: liveins: $sgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK: S_ENDPGM 0, implicit [[COPY]] %0:sgpr(p5) = COPY $sgpr0 %1:sgpr(s32) = G_PTRTOINT %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-select.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-select.mir index 0462de8..8998cb2 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-select.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-select.mir @@ -16,7 +16,7 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5 ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY4]] ; GCN: [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[COPY2]], [[COPY3]], implicit $scc ; GCN: S_ENDPGM 0, implicit [[S_CSELECT_B64_]] @@ -45,7 +45,7 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5 ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY4]] ; GCN: [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[COPY2]], [[COPY3]], implicit $scc ; GCN: S_ENDPGM 0, implicit [[S_CSELECT_B64_]] @@ -69,12 +69,12 @@ body: | liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 ; GCN-LABEL: name: select_s16_scc - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 ; GCN: S_CMP_EQ_U32 [[COPY2]], [[COPY3]], implicit-def $scc - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY4]] ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GCN: S_ENDPGM 0, implicit [[S_CSELECT_B32_]] @@ -105,7 +105,7 @@ body: | ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GCN: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $scc ; GCN: $scc = COPY [[COPY4]] ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY2]], [[COPY3]], implicit $scc ; GCN: S_ENDPGM 0, implicit [[S_CSELECT_B32_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir index ac7da5f..59a75f0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir @@ -15,7 +15,7 @@ body: | ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY]], implicit-def $scc ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; GCN: $scc = COPY [[COPY1]] - ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, -1, implicit $scc + ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, -1, implicit $scc ; GCN: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:scc(s1) = G_ICMP intpred(eq), %0, %0 @@ -55,8 +55,8 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: sext_sgpr_s1_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_BFE_I32_:%[0-9]+]]:sreg_32_xm0 = S_BFE_I32 [[COPY]], 65536, implicit-def $scc + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[S_BFE_I32_:%[0-9]+]]:sreg_32 = S_BFE_I32 [[COPY]], 65536, implicit-def $scc ; GCN: $sgpr0 = COPY [[S_BFE_I32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s1) = G_TRUNC %0 @@ -74,7 +74,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: sext_sgpr_s1_to_sgpr_s64 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1 ; GCN: [[S_BFE_I64_:%[0-9]+]]:sreg_64_xexec = S_BFE_I64 [[REG_SEQUENCE]], 65536, implicit-def $scc @@ -95,8 +95,8 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: sext_sgpr_s8_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_SEXT_I32_I8_:%[0-9]+]]:sreg_32_xm0 = S_SEXT_I32_I8 [[COPY]] + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[S_SEXT_I32_I8_:%[0-9]+]]:sreg_32 = S_SEXT_I32_I8 [[COPY]] ; GCN: $sgpr0 = COPY [[S_SEXT_I32_I8_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s8) = G_TRUNC %0 @@ -114,8 +114,8 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: sext_sgpr_s16_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_SEXT_I32_I16_:%[0-9]+]]:sreg_32_xm0 = S_SEXT_I32_I16 [[COPY]] + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[S_SEXT_I32_I16_:%[0-9]+]]:sreg_32 = S_SEXT_I32_I16 [[COPY]] ; GCN: $sgpr0 = COPY [[S_SEXT_I32_I16_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -134,7 +134,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: sext_sgpr_s16_to_sgpr_s64 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1 ; GCN: [[S_BFE_I64_:%[0-9]+]]:sreg_64_xexec = S_BFE_I64 [[REG_SEQUENCE]], 1048576, implicit-def $scc diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir index 28df252..491f2ef 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir @@ -54,28 +54,28 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: shl_s32_sv - ; GFX6: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX6: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX7-LABEL: name: shl_s32_sv - ; GFX7: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX7: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX8-LABEL: name: shl_s32_sv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX9-LABEL: name: shl_s32_sv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX10-LABEL: name: shl_s32_sv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] @@ -95,28 +95,28 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: shl_s32_vs ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX7-LABEL: name: shl_s32_vs ; GFX7: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX8-LABEL: name: shl_s32_vs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX9-LABEL: name: shl_s32_vs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] ; GFX10-LABEL: name: shl_s32_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B32_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -255,28 +255,28 @@ body: | liveins: $sgpr0, $vgpr0_vgpr1 ; GFX6-LABEL: name: shl_s64_vs ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX7-LABEL: name: shl_s64_vs ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX7: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX8-LABEL: name: shl_s64_vs ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] ; GFX9-LABEL: name: shl_s64_vs ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] ; GFX10-LABEL: name: shl_s64_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir index 4ba4924..4321e4e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir @@ -66,18 +66,18 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX8-LABEL: name: shl_s16_s16_vs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] ; GFX9-LABEL: name: shl_s16_s16_vs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] ; GFX10-LABEL: name: shl_s16_s16_vs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] %0:vgpr(s32) = COPY $vgpr0 @@ -310,18 +310,18 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GFX8-LABEL: name: shl_s16_s16_sv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] ; GFX9-LABEL: name: shl_s16_s16_sv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] ; GFX10-LABEL: name: shl_s16_s16_sv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir index 63d6427..3f7917d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir @@ -13,7 +13,7 @@ body: | liveins: $sgpr0, $vgpr0, $vgpr3_vgpr4 ; WAVE64-LABEL: name: sitofp - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE64: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; WAVE64: [[V_CVT_F32_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY]], 0, 0, implicit $exec @@ -22,7 +22,7 @@ body: | ; WAVE64: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_F32_I32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1) ; WAVE32-LABEL: name: sitofp ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; WAVE32: [[V_CVT_F32_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY]], 0, 0, implicit $exec @@ -86,14 +86,14 @@ body: | ; WAVE64-LABEL: name: sitofp_s32_to_s16_vs ; WAVE64: liveins: $sgpr0 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $exec ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $exec ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]] ; WAVE32-LABEL: name: sitofp_s32_to_s16_vs ; WAVE32: liveins: $sgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $exec ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $exec ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]] @@ -120,7 +120,7 @@ body: | ; WAVE64: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE64: $scc = COPY [[COPY1]] - ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 3212836864, implicit $scc + ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 3212836864, implicit $scc ; WAVE64: $sgpr0 = COPY [[S_CSELECT_B32_]] ; WAVE32-LABEL: name: sitofp_s1_to_s32_s_scc ; WAVE32: liveins: $sgpr0 @@ -130,7 +130,7 @@ body: | ; WAVE32: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE32: $scc = COPY [[COPY1]] - ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 3212836864, implicit $scc + ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 3212836864, implicit $scc ; WAVE32: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_CONSTANT i32 0 @@ -156,7 +156,7 @@ body: | ; WAVE64: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE64: $scc = COPY [[COPY1]] - ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 48128, implicit $scc + ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 48128, implicit $scc ; WAVE64: $sgpr0 = COPY [[S_CSELECT_B32_]] ; WAVE32-LABEL: name: sitofp_s1_to_s16_to_s_scc ; WAVE32: liveins: $sgpr0 @@ -166,7 +166,7 @@ body: | ; WAVE32: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE32: $scc = COPY [[COPY1]] - ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 48128, implicit $scc + ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 48128, implicit $scc ; WAVE32: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_CONSTANT i32 0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smax.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smax.mir index 1ffe7d3..ae6a6e7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smax.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smax.mir @@ -29,7 +29,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smax_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MAX_I32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_I32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MAX_I32_e64_]] @@ -49,7 +49,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smax_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MAX_I32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_I32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MAX_I32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smin.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smin.mir index 3c37a94..ed120f9 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smin.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smin.mir @@ -29,7 +29,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smin_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MIN_I32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_I32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MIN_I32_e64_]] @@ -49,7 +49,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smin_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MIN_I32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_I32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MIN_I32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smulh.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smulh.mir index 4d41268..ea6fe37 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smulh.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-smulh.mir @@ -36,7 +36,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smulh_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MUL_HI_I32_:%[0-9]+]]:vgpr_32 = V_MUL_HI_I32 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_HI_I32_]] @@ -56,7 +56,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: smulh_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MUL_HI_I32_:%[0-9]+]]:vgpr_32 = V_MUL_HI_I32 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_HI_I32_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sub.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sub.mir index 07e6e47..0d301d1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sub.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sub.mir @@ -20,7 +20,7 @@ body: | ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[S_SUB_U32_:%[0-9]+]]:sreg_32_xm0 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX6: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX6: %7:vgpr_32, dead %12:sreg_64_xexec = V_SUB_I32_e64 [[COPY2]], [[S_SUB_U32_]], 0, implicit $exec ; GFX6: %8:vgpr_32, dead %11:sreg_64_xexec = V_SUB_I32_e64 [[S_SUB_U32_]], %7, 0, implicit $exec ; GFX6: %9:vgpr_32, dead %10:sreg_64_xexec = V_SUB_I32_e64 %8, [[COPY2]], 0, implicit $exec @@ -30,7 +30,7 @@ body: | ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[S_SUB_U32_:%[0-9]+]]:sreg_32_xm0 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX9: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX9: [[V_SUB_U32_e64_:%[0-9]+]]:vgpr_32 = V_SUB_U32_e64 [[COPY2]], [[S_SUB_U32_]], 0, implicit $exec ; GFX9: [[V_SUB_U32_e64_1:%[0-9]+]]:vgpr_32 = V_SUB_U32_e64 [[S_SUB_U32_]], [[V_SUB_U32_e64_]], 0, implicit $exec ; GFX9: [[V_SUB_U32_e64_2:%[0-9]+]]:vgpr_32 = V_SUB_U32_e64 [[V_SUB_U32_e64_1]], [[COPY2]], 0, implicit $exec @@ -41,7 +41,7 @@ body: | ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[S_SUB_U32_:%[0-9]+]]:sreg_32_xm0 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX10: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX10: [[V_SUB_U32_e64_:%[0-9]+]]:vgpr_32 = V_SUB_U32_e64 [[COPY2]], [[S_SUB_U32_]], 0, implicit $exec ; GFX10: [[V_SUB_U32_e64_1:%[0-9]+]]:vgpr_32 = V_SUB_U32_e64 [[S_SUB_U32_]], [[V_SUB_U32_e64_]], 0, implicit $exec ; GFX10: [[V_SUB_U32_e64_2:%[0-9]+]]:vgpr_32 = V_SUB_U32_e64 [[V_SUB_U32_e64_1]], [[COPY2]], 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir index d5e7bc43..82d9549 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir @@ -11,7 +11,7 @@ body: | bb.0: liveins: $sgpr0 ; GCN-LABEL: name: trunc_sgpr_s32_to_s1 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: S_ENDPGM 0, implicit [[COPY]] %0:sgpr(s32) =COPY $sgpr0 %1:sgpr(s1) = G_TRUNC %0 @@ -27,7 +27,7 @@ body: | bb.0: liveins: $sgpr0 ; GCN-LABEL: name: trunc_sgpr_s32_to_s16 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: S_ENDPGM 0, implicit [[COPY]] %0:sgpr(s32) =COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -44,7 +44,7 @@ body: | liveins: $sgpr0_sgpr1 ; GCN-LABEL: name: trunc_sgpr_s64_to_s32 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 ; GCN: S_ENDPGM 0, implicit [[COPY1]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s32) = G_TRUNC %0 @@ -61,7 +61,7 @@ body: | liveins: $sgpr0_sgpr1 ; GCN-LABEL: name: trunc_sgpr_s64_to_s16 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 ; GCN: S_ENDPGM 0, implicit [[COPY1]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s16) = G_TRUNC %0 @@ -78,7 +78,7 @@ body: | liveins: $sgpr0_sgpr1 ; GCN-LABEL: name: trunc_sgpr_s64_to_s1 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 ; GCN: S_ENDPGM 0, implicit [[COPY1]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s1) = G_TRUNC %0 @@ -95,7 +95,7 @@ body: | liveins: $sgpr0_sgpr1_sgpr2 ; GCN-LABEL: name: trunc_sgpr_s96_to_s16 ; GCN: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 ; GCN: S_ENDPGM 0, implicit [[COPY1]] %0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2 %1:sgpr(s16) = G_TRUNC %0 @@ -129,7 +129,7 @@ body: | liveins: $sgpr0_sgpr1_sgpr2_sgpr3 ; GCN-LABEL: name: trunc_sgpr_s128_to_s16 ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 ; GCN: S_ENDPGM 0, implicit [[COPY1]] %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 %1:sgpr(s16) = G_TRUNC %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uaddo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uaddo.mir index 6cd1a1a..a53ec2d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uaddo.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uaddo.mir @@ -17,7 +17,7 @@ body: | ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX6: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX6: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX6: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX6: $scc = COPY [[COPY2]] ; GFX6: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX6: S_ENDPGM 0, implicit [[S_ADD_U32_]], implicit [[S_CSELECT_B32_]] @@ -25,7 +25,7 @@ body: | ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX8: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX8: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX8: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX8: $scc = COPY [[COPY2]] ; GFX8: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX8: S_ENDPGM 0, implicit [[S_ADD_U32_]], implicit [[S_CSELECT_B32_]] @@ -33,7 +33,7 @@ body: | ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX9: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX9: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX9: $scc = COPY [[COPY2]] ; GFX9: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX9: S_ENDPGM 0, implicit [[S_ADD_U32_]], implicit [[S_CSELECT_B32_]] @@ -42,7 +42,7 @@ body: | ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX10: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX10: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX10: $scc = COPY [[COPY2]] ; GFX10: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX10: S_ENDPGM 0, implicit [[S_ADD_U32_]], implicit [[S_CSELECT_B32_]] @@ -104,7 +104,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: uaddo_s32_s1_vsv - ; GFX6: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX6: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX6: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -112,7 +112,7 @@ body: | ; GFX6: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[V_MOV_B32_e32_1]], 0, [[V_MOV_B32_e32_]], [[V_ADD_I32_e64_1]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_ADD_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX8-LABEL: name: uaddo_s32_s1_vsv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -120,7 +120,7 @@ body: | ; GFX8: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[V_MOV_B32_e32_1]], 0, [[V_MOV_B32_e32_]], [[V_ADD_I32_e64_1]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_ADD_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX9-LABEL: name: uaddo_s32_s1_vsv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -129,7 +129,7 @@ body: | ; GFX9: S_ENDPGM 0, implicit [[V_ADD_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX10-LABEL: name: uaddo_s32_s1_vsv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX10: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -156,7 +156,7 @@ body: | ; GFX6-LABEL: name: uaddo_s32_s1_vvs ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX6: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX6: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec @@ -164,7 +164,7 @@ body: | ; GFX6: S_ENDPGM 0, implicit [[V_ADD_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX8-LABEL: name: uaddo_s32_s1_vvs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX8: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec @@ -172,7 +172,7 @@ body: | ; GFX8: S_ENDPGM 0, implicit [[V_ADD_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX9-LABEL: name: uaddo_s32_s1_vvs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX9: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec @@ -181,7 +181,7 @@ body: | ; GFX10-LABEL: name: uaddo_s32_s1_vvs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_ADD_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX10: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX10: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir index ee2377a..7ae77a8 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir @@ -40,13 +40,13 @@ body: | ; WAVE64-LABEL: name: uitofp_s32_to_s32_vs ; WAVE64: liveins: $sgpr0 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $exec ; WAVE64: $vgpr0 = COPY [[V_CVT_F32_U32_e64_]] ; WAVE32-LABEL: name: uitofp_s32_to_s32_vs ; WAVE32: liveins: $sgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $exec ; WAVE32: $vgpr0 = COPY [[V_CVT_F32_U32_e64_]] %0:sgpr(s32) = COPY $sgpr0 @@ -95,14 +95,14 @@ body: | ; WAVE64-LABEL: name: uitofp_s32_to_s16_vs ; WAVE64: liveins: $sgpr0 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $exec ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $exec ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]] ; WAVE32-LABEL: name: uitofp_s32_to_s16_vs ; WAVE32: liveins: $sgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $exec ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $exec ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]] @@ -129,7 +129,7 @@ body: | ; WAVE64: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE64: $scc = COPY [[COPY1]] - ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 1065353216, implicit $scc + ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 1065353216, implicit $scc ; WAVE64: $sgpr0 = COPY [[S_CSELECT_B32_]] ; WAVE32-LABEL: name: uitofp_s1_to_s32_s_scc ; WAVE32: liveins: $sgpr0 @@ -139,7 +139,7 @@ body: | ; WAVE32: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE32: $scc = COPY [[COPY1]] - ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 1065353216, implicit $scc + ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 1065353216, implicit $scc ; WAVE32: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_CONSTANT i32 0 @@ -165,7 +165,7 @@ body: | ; WAVE64: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE64: $scc = COPY [[COPY1]] - ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 15360, implicit $scc + ; WAVE64: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 15360, implicit $scc ; WAVE64: $sgpr0 = COPY [[S_CSELECT_B32_]] ; WAVE32-LABEL: name: uitofp_s1_to_s16_to_s_scc ; WAVE32: liveins: $sgpr0 @@ -175,7 +175,7 @@ body: | ; WAVE32: S_CMP_EQ_U32 [[COPY]], [[S_MOV_B32_]], implicit-def $scc ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; WAVE32: $scc = COPY [[COPY1]] - ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 15360, implicit $scc + ; WAVE32: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 15360, implicit $scc ; WAVE32: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = G_CONSTANT i32 0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umax.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umax.mir index a5ae6ab..ae66e08 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umax.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umax.mir @@ -29,7 +29,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: umax_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MAX_U32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_U32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MAX_U32_e64_]] @@ -49,7 +49,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: umax_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MAX_U32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_U32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MAX_U32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umin.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umin.mir index be097fc..5b09a86 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umin.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umin.mir @@ -29,7 +29,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: umin_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MIN_U32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_U32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MIN_U32_e64_]] @@ -49,7 +49,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: umin_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MIN_U32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_U32_e64 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MIN_U32_e64_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umulh.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umulh.mir index a4726fc..3210ce6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umulh.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-umulh.mir @@ -36,7 +36,7 @@ body: | bb.0: liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: umulh_s32_sv - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MUL_HI_U32_:%[0-9]+]]:vgpr_32 = V_MUL_HI_U32 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_HI_U32_]] @@ -56,7 +56,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GCN-LABEL: name: umulh_s32_vs ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[V_MUL_HI_U32_:%[0-9]+]]:vgpr_32 = V_MUL_HI_U32 [[COPY]], [[COPY1]], implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_HI_U32_]] %0:vgpr(s32) = COPY $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir index ad6b23c..0eef7c3 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir @@ -40,8 +40,8 @@ body: | ; GCN-LABEL: name: test_unmerge_values_s_s32_s_s32_s_s64 ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub1 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1 ; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s32), %2:sgpr(s32) = G_UNMERGE_VALUES %0 @@ -62,7 +62,7 @@ body: | ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub1 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1 ; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:vgpr(s32), %2:sgpr(s32) = G_UNMERGE_VALUES %0 @@ -82,7 +82,7 @@ body: | ; GCN-LABEL: name: test_unmerge_values_s_s32_v_s32_s_s64 ; GCN: liveins: $sgpr0_sgpr1 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1 ; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 @@ -100,7 +100,7 @@ body: | bb.0: ; GCN-LABEL: name: test_unmerge_values_s_s32_v_s32_s_s64_undef_src - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY undef %2.sub0:sreg_64_xexec + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY undef %2.sub0:sreg_64_xexec ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY undef %2.sub1:sreg_64_xexec ; GCN: S_ENDPGM 0, implicit [[COPY]], implicit [[COPY1]] %1:sgpr(s32), %2:vgpr(s32) = G_UNMERGE_VALUES undef %0:sgpr(s64) @@ -120,9 +120,9 @@ body: | ; GCN-LABEL: name: test_unmerge_values_s_s32_s_s32_s32_s_s96 ; GCN: liveins: $sgpr0_sgpr1_sgpr2 ; GCN: [[COPY:%[0-9]+]]:sreg_96 = COPY $sgpr0_sgpr1_sgpr2 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub1 - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub2 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1 + ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub2 ; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]], implicit [[COPY3]] %0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2 %1:sgpr(s32), %2:sgpr(s32), %3:sgpr(s32) = G_UNMERGE_VALUES %0 @@ -142,10 +142,10 @@ body: | ; GCN-LABEL: name: test_unmerge_values_s_s32_s_s32_s32_s_s32_s_s128 ; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3 ; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 - ; GCN: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub0 - ; GCN: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub1 - ; GCN: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub2 - ; GCN: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY]].sub3 + ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 + ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1 + ; GCN: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub2 + ; GCN: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub3 ; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]], implicit [[COPY3]], implicit [[COPY4]] %0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 %1:sgpr(s32), %2:sgpr(s32), %3:sgpr(s32), %4:sgpr(s32) = G_UNMERGE_VALUES %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-usubo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-usubo.mir index 171a2d5..a2ceaad 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-usubo.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-usubo.mir @@ -17,7 +17,7 @@ body: | ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX6: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX6: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX6: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX6: $scc = COPY [[COPY2]] ; GFX6: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX6: S_ENDPGM 0, implicit [[S_SUB_U32_]], implicit [[S_CSELECT_B32_]] @@ -25,7 +25,7 @@ body: | ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX8: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX8: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX8: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX8: $scc = COPY [[COPY2]] ; GFX8: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX8: S_ENDPGM 0, implicit [[S_SUB_U32_]], implicit [[S_CSELECT_B32_]] @@ -33,7 +33,7 @@ body: | ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX9: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX9: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX9: $scc = COPY [[COPY2]] ; GFX9: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX9: S_ENDPGM 0, implicit [[S_SUB_U32_]], implicit [[S_CSELECT_B32_]] @@ -42,7 +42,7 @@ body: | ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[S_SUB_U32_:%[0-9]+]]:sreg_32 = S_SUB_U32 [[COPY]], [[COPY1]], implicit-def $scc - ; GFX10: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; GFX10: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $scc ; GFX10: $scc = COPY [[COPY2]] ; GFX10: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY]], [[COPY1]], implicit $scc ; GFX10: S_ENDPGM 0, implicit [[S_SUB_U32_]], implicit [[S_CSELECT_B32_]] @@ -104,7 +104,7 @@ body: | liveins: $sgpr0, $vgpr0 ; GFX6-LABEL: name: usubo_s32_s1_vsv - ; GFX6: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX6: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX6: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -112,7 +112,7 @@ body: | ; GFX6: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[V_MOV_B32_e32_1]], 0, [[V_MOV_B32_e32_]], [[V_SUB_I32_e64_1]], implicit $exec ; GFX6: S_ENDPGM 0, implicit [[V_SUB_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX8-LABEL: name: usubo_s32_s1_vsv - ; GFX8: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -120,7 +120,7 @@ body: | ; GFX8: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[V_MOV_B32_e32_1]], 0, [[V_MOV_B32_e32_]], [[V_SUB_I32_e64_1]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_SUB_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX9-LABEL: name: usubo_s32_s1_vsv - ; GFX9: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -129,7 +129,7 @@ body: | ; GFX9: S_ENDPGM 0, implicit [[V_SUB_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX10-LABEL: name: usubo_s32_s1_vsv ; GFX10: $vcc_hi = IMPLICIT_DEF - ; GFX10: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX10: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX10: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec @@ -156,7 +156,7 @@ body: | ; GFX6-LABEL: name: usubo_s32_s1_vvs ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX6: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX6: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX6: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec @@ -164,7 +164,7 @@ body: | ; GFX6: S_ENDPGM 0, implicit [[V_SUB_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX8-LABEL: name: usubo_s32_s1_vvs ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX8: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec @@ -172,7 +172,7 @@ body: | ; GFX8: S_ENDPGM 0, implicit [[V_SUB_I32_e64_]], implicit [[V_CNDMASK_B32_e64_]] ; GFX9-LABEL: name: usubo_s32_s1_vvs ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_64_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX9: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec @@ -181,7 +181,7 @@ body: | ; GFX10-LABEL: name: usubo_s32_s1_vvs ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[V_SUB_I32_e64_:%[0-9]+]]:vgpr_32, [[V_SUB_I32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_SUB_I32_e64 [[COPY]], [[COPY1]], 0, implicit $exec ; GFX10: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; GFX10: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir index f915b3b..8bf8f3d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir @@ -28,9 +28,9 @@ body: | ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; WAVE32: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; WAVE32: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec - ; WAVE32: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_EQ_U32_e64 [[COPY1]], [[V_MOV_B32_e32_]], implicit $exec - ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 [[V_CMP_EQ_U32_e64_]], [[V_CMP_EQ_U32_e64_1]] + ; WAVE32: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec + ; WAVE32: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[COPY1]], [[V_MOV_B32_e32_]], implicit $exec + ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[V_CMP_EQ_U32_e64_]], [[V_CMP_EQ_U32_e64_1]] ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -53,15 +53,15 @@ body: | liveins: $sgpr0, $sgpr1 ; WAVE64-LABEL: name: xor_s1_sgpr_sgpr_sgpr ; WAVE64: liveins: $sgpr0, $sgpr1 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE64: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]] ; WAVE64: S_ENDPGM 0, implicit [[S_XOR_B32_]] ; WAVE32-LABEL: name: xor_s1_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]] ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] %0:sgpr(s32) = COPY $sgpr0 @@ -118,15 +118,15 @@ body: | liveins: $sgpr0, $sgpr1 ; WAVE64-LABEL: name: xor_s16_sgpr_sgpr_sgpr ; WAVE64: liveins: $sgpr0, $sgpr1 - ; WAVE64: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE64: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]] ; WAVE64: S_ENDPGM 0, implicit [[S_XOR_B32_]] ; WAVE32-LABEL: name: xor_s16_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr1 + ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]] ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] %0:sgpr(s32) = COPY $sgpr0 @@ -415,7 +415,7 @@ body: | ; WAVE64: S_ENDPGM 0, implicit [[S_XOR_B64_]] ; WAVE32-LABEL: name: xor_s1_vcc_undef_vcc_undef_vcc ; WAVE32: $vcc_hi = IMPLICIT_DEF - ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 undef %1:sreg_32_xm0, undef %2:sreg_32_xm0 + ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 undef %1:sreg_32, undef %2:sreg_32 ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] %2:vcc(s1) = G_XOR undef %0:vcc(s1), undef %1:vcc(s1) S_ENDPGM 0, implicit %2 @@ -483,9 +483,9 @@ body: | ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec - ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY1]], implicit $exec - ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY1]], implicit $exec + ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 @@ -513,22 +513,22 @@ body: | ; WAVE64-LABEL: name: copy_select_constrain_vcc_result_reg_wave32 ; WAVE64: liveins: $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]] - ; WAVE64: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B32_]] - ; WAVE64: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY1]], [[COPY2]] - ; WAVE64: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[S_XOR_B32_]] - ; WAVE64: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY3]] - ; WAVE64: S_ENDPGM 0, implicit [[COPY4]] + ; WAVE64: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY1]], [[S_MOV_B32_]] + ; WAVE64: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY [[S_XOR_B32_]] + ; WAVE64: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY [[COPY2]] + ; WAVE64: S_ENDPGM 0, implicit [[COPY3]] ; WAVE32-LABEL: name: copy_select_constrain_vcc_result_reg_wave32 ; WAVE32: liveins: $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 - ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec - ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32_xm0 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec - ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0 = S_XOR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] - ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] + ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec + ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY [[S_XOR_B32_]] + ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %1:vgpr(s32) = COPY $vgpr0 %0:vgpr(s1) = G_TRUNC %1(s32) %2:sgpr(s1) = G_CONSTANT i1 true @@ -555,7 +555,7 @@ body: | ; WAVE64-LABEL: name: copy_select_constrain_vcc_result_reg_wave64 ; WAVE64: liveins: $vgpr0 ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 + ; WAVE64: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 ; WAVE64: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec ; WAVE64: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec ; WAVE64: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] @@ -565,12 +565,12 @@ body: | ; WAVE32: liveins: $vgpr0 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 - ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]] - ; WAVE32: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B32_]] - ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_XOR_B32 [[COPY1]], [[COPY2]] - ; WAVE32: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY [[S_XOR_B32_]] - ; WAVE32: S_ENDPGM 0, implicit [[COPY3]] + ; WAVE32: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1 + ; WAVE32: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[COPY]], implicit $exec + ; WAVE32: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_NE_U32_e64 0, [[S_MOV_B32_]], implicit $exec + ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_XOR_B32 [[V_CMP_NE_U32_e64_]], [[V_CMP_NE_U32_e64_1]] + ; WAVE32: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[S_XOR_B32_]] + ; WAVE32: S_ENDPGM 0, implicit [[COPY1]] %1:vgpr(s32) = COPY $vgpr0 %0:vgpr(s1) = G_TRUNC %1(s32) %2:sgpr(s1) = G_CONSTANT i1 true diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir index 9caf823..0871983 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir @@ -15,7 +15,7 @@ body: | ; GCN: S_CMP_EQ_U32 [[COPY]], [[COPY]], implicit-def $scc ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $scc ; GCN: $scc = COPY [[COPY1]] - ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0 = S_CSELECT_B32 0, 1, implicit $scc + ; GCN: [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 0, 1, implicit $scc ; GCN: $sgpr0 = COPY [[S_CSELECT_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:scc(s1) = G_ICMP intpred(eq), %0, %0 @@ -55,8 +55,8 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: zext_sgpr_s1_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], 1, implicit-def $scc + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], 1, implicit-def $scc ; GCN: $sgpr0 = COPY [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s1) = G_TRUNC %0 @@ -74,7 +74,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: zext_sgpr_s1_to_sgpr_s64 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1 ; GCN: [[S_BFE_U64_:%[0-9]+]]:sreg_64_xexec = S_BFE_U64 [[REG_SEQUENCE]], 65536, implicit-def $scc @@ -95,8 +95,8 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: zext_sgpr_s8_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_BFE_U32_:%[0-9]+]]:sreg_32_xm0 = S_BFE_U32 [[COPY]], 524288, implicit-def $scc + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[S_BFE_U32_:%[0-9]+]]:sreg_32 = S_BFE_U32 [[COPY]], 524288, implicit-def $scc ; GCN: $sgpr0 = COPY [[S_BFE_U32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s8) = G_TRUNC %0 @@ -115,8 +115,8 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: zext_sgpr_s16_to_sgpr_s32 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_BFE_U32_:%[0-9]+]]:sreg_32_xm0 = S_BFE_U32 [[COPY]], 1048576, implicit-def $scc + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN: [[S_BFE_U32_:%[0-9]+]]:sreg_32 = S_BFE_U32 [[COPY]], 1048576, implicit-def $scc ; GCN: $sgpr0 = COPY [[S_BFE_U32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s16) = G_TRUNC %0 @@ -135,7 +135,7 @@ body: | liveins: $sgpr0 ; GCN-LABEL: name: zext_sgpr_s16_to_sgpr_s64 - ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1 ; GCN: [[S_BFE_U64_:%[0-9]+]]:sreg_64_xexec = S_BFE_U64 [[REG_SEQUENCE]], 1048576, implicit-def $scc diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f16.ll index ea012b3..c961514 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f16.ll @@ -6,10 +6,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f16 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -19,10 +19,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f16 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -37,10 +37,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__voffset_409 ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__voffset_4095__sgpr_soffset_f16 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -49,10 +49,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__voffset_409 ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__voffset_4095__sgpr_soffset_f16 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -66,15 +66,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY7]], [[COPY4]], implicit $exec ; UNPACKED: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_LSHRREV_B32_e64_]], %subreg.sub1 @@ -83,10 +83,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -101,16 +101,16 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; UNPACKED: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY8]], [[COPY4]], implicit $exec ; UNPACKED: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] @@ -121,10 +121,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -152,7 +152,7 @@ define amdgpu_ps void @raw_buffer_store_format__vgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 ; UNPACKED: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY8]], [[COPY4]], implicit $exec ; UNPACKED: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] @@ -228,15 +228,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset4095 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 - ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY6]], [[COPY4]], implicit $exec ; UNPACKED: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_LSHRREV_B32_e64_]], %subreg.sub1 @@ -245,10 +245,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset4095 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -263,15 +263,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset4096 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 - ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY6]], [[COPY4]], implicit $exec ; UNPACKED: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_LSHRREV_B32_e64_]], %subreg.sub1 @@ -280,10 +280,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset4096 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -298,15 +298,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_voffset_add_16 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: %11:vgpr_32, dead %21:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] @@ -317,15 +317,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_voffset_add_16 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; PACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; PACKED: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; PACKED: BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -339,18 +339,18 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_voffset_add_4095 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4095 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 ; UNPACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: %11:vgpr_32, dead %22:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec - ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY8]], [[COPY4]], implicit $exec ; UNPACKED: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_LSHRREV_B32_e64_]], %subreg.sub1 @@ -359,15 +359,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_voffset_add_4095 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4095 + ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 ; PACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; PACKED: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; PACKED: BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -381,18 +381,18 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_voffset_add_4096 ; UNPACKED: bb.1 (%ir-block.0): ; UNPACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; UNPACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; UNPACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; UNPACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; UNPACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; UNPACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; UNPACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; UNPACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; UNPACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: %11:vgpr_32, dead %22:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec - ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY8]], [[COPY4]], implicit $exec ; UNPACKED: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_LSHRREV_B32_e64_]], %subreg.sub1 @@ -401,15 +401,15 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_voffset_add_4096 ; PACKED: bb.1 (%ir-block.0): ; PACKED: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; PACKED: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; PACKED: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; PACKED: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; PACKED: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; PACKED: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; PACKED: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; PACKED: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; PACKED: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; PACKED: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; PACKED: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; PACKED: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; PACKED: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; PACKED: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; PACKED: BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -435,10 +435,10 @@ define amdgpu_ps void @raw_buffer_store_format__vgpr_rsrc__vgpr_val__vgpr_voffse ; UNPACKED: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 ; UNPACKED: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; UNPACKED: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; UNPACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; UNPACKED: %13:vgpr_32, dead %47:sreg_64_xexec = V_ADD_I32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec - ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; UNPACKED: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; UNPACKED: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] ; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY9]], [[COPY4]], implicit $exec ; UNPACKED: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] @@ -482,7 +482,7 @@ define amdgpu_ps void @raw_buffer_store_format__vgpr_rsrc__vgpr_val__vgpr_voffse ; PACKED: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; PACKED: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; PACKED: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; PACKED: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; PACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; PACKED: %13:vgpr_32, dead %31:sreg_64_xexec = V_ADD_I32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec ; PACKED: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f32.ll index 1793bbd..9e98a3a0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.format.f32.ll @@ -6,10 +6,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -24,10 +24,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__voffset_409 ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__voffset_4095__sgpr_soffset_f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -41,10 +41,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -61,10 +61,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -82,10 +82,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -149,10 +149,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32_soffset4095 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -169,10 +169,10 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32_soffset4096 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -189,17 +189,17 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32_voffset_add_16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %13:vgpr_32, dead %15:sreg_64_xexec = V_ADD_I32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec ; CHECK: BUFFER_STORE_FORMAT_XY_OFFEN_exact [[REG_SEQUENCE1]], %13, [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 8 into custom TargetCustom7, align 1, addrspace 4) @@ -213,17 +213,17 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32_voffset_add_4095 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4095 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %13:vgpr_32, dead %15:sreg_64_xexec = V_ADD_I32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec ; CHECK: BUFFER_STORE_FORMAT_XY_OFFEN_exact [[REG_SEQUENCE1]], %13, [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 8 into custom TargetCustom7, align 1, addrspace 4) @@ -237,17 +237,17 @@ define amdgpu_ps void @raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK-LABEL: name: raw_buffer_store_format__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32_voffset_add_4096 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %13:vgpr_32, dead %15:sreg_64_xexec = V_ADD_I32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec ; CHECK: BUFFER_STORE_FORMAT_XY_OFFEN_exact [[REG_SEQUENCE1]], %13, [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 8 into custom TargetCustom7, align 1, addrspace 4) @@ -276,7 +276,7 @@ define amdgpu_ps void @raw_buffer_store_format__vgpr_rsrc__vgpr_val__vgpr_voffse ; CHECK: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; CHECK: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %15:vgpr_32, dead %33:sreg_64_xexec = V_ADD_I32_e64 [[COPY8]], [[COPY10]], 0, implicit $exec ; CHECK: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll index 6e6a7d0..a01f6b9 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll @@ -7,10 +7,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -26,12 +26,12 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__sgpr_val__sgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__sgpr_val__sgpr_voffset__sgpr_soffset ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 - ; CHECK: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr6 - ; CHECK: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr7 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr7 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr8 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] @@ -90,10 +90,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__vgpr ; CHECK: bb.1 (%ir-block.0): ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -165,10 +165,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_glc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -183,10 +183,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_slc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -201,10 +201,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_glc_slc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -219,10 +219,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_dlc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -237,10 +237,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_slc_dlc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -255,10 +255,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_glc_dlc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -273,10 +273,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_glc_slc_dlc ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -291,10 +291,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -311,10 +311,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -332,10 +332,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -354,10 +354,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_i8 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -373,10 +373,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_i16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -392,10 +392,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -410,10 +410,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 @@ -428,10 +428,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -491,10 +491,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__sgpr_soffset_f32_v ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__sgpr_soffset_f32_voffset4095 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -508,10 +508,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__sgpr_soffset_f32_v ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__sgpr_soffset_f32_voffset4096 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -526,15 +526,15 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -548,15 +548,15 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4095 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -570,15 +570,15 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -592,10 +592,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset4095 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -610,10 +610,10 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset4096 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 @@ -628,15 +628,15 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 16 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -650,15 +650,15 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4095 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -672,15 +672,15 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; CHECK: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr2 - ; CHECK: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr3 - ; CHECK: [[COPY2:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr4 - ; CHECK: [[COPY3:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr5 + ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %13:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], %11, [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom TargetCustom7, align 1, addrspace 4) @@ -704,7 +704,7 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 5000 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK: %11:vgpr_32, dead %29:sreg_64_xexec = V_ADD_I32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-intrinsics-mmo-offsets.ll b/llvm/test/CodeGen/AMDGPU/buffer-intrinsics-mmo-offsets.ll index 551c3b0..f96a138 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-intrinsics-mmo-offsets.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-intrinsics-mmo-offsets.ll @@ -7,7 +7,7 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: liveins: $sgpr0, $vgpr0 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 - ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1 ; GCN: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM killed [[REG_SEQUENCE]], 0, 0, 0 :: (dereferenceable invariant load 16 from %ir.arg0, addrspace 6) ; GCN: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 16, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 16, align 1, addrspace 4) @@ -57,27 +57,27 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: BUFFER_ATOMIC_ADD_F32_IDXEN [[V_MOV_B32_e32_1]], [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 112, 0, implicit $exec :: (load store 4 on custom TargetCustom7, addrspace 4) ; GCN: INLINEASM &"", 1 ; GCN: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 128, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 128, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 64 + ; GCN: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 64 ; GCN: [[BUFFER_LOAD_DWORDX4_OFFSET2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_1]], 64, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 128, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_2:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 128 + ; GCN: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 128 ; GCN: [[BUFFER_LOAD_DWORDX4_OFFSET3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_2]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 128, align 1, addrspace 4) ; GCN: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_2]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7, align 1, addrspace 4) ; GCN: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY]] ; GCN: [[BUFFER_LOAD_DWORDX4_OFFSET4:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[S_LOAD_DWORDX4_IMM]], [[COPY6]], 128, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7, align 1, addrspace 4) ; GCN: INLINEASM &"", 1 ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_OFFSET [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 144, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 144, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_3:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 72 + ; GCN: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 72 ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_OFFSET2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_OFFSET [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_3]], 72, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 144, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_4:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 144 + ; GCN: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 144 ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_OFFSET3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_OFFSET [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 144, align 1, addrspace 4) ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_OFFEN [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7, align 1, addrspace 4) ; GCN: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[COPY]] ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_OFFSET4:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_OFFSET [[S_LOAD_DWORDX4_IMM]], [[COPY7]], 144, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7, align 1, addrspace 4) ; GCN: INLINEASM &"", 1 ; GCN: BUFFER_ATOMIC_ADD_OFFSET [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 160, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 160, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_5:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 80 + ; GCN: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 80 ; GCN: BUFFER_ATOMIC_ADD_OFFSET [[COPY]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_5]], 80, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 160, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_6:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 160 + ; GCN: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 160 ; GCN: BUFFER_ATOMIC_ADD_OFFSET [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_6]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 160, align 1, addrspace 4) ; GCN: BUFFER_ATOMIC_ADD_OFFEN [[COPY]], [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_6]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7, align 1, addrspace 4) ; GCN: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[COPY]] @@ -86,11 +86,11 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: [[DEF4:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: BUFFER_ATOMIC_CMPSWAP_OFFSET [[REG_SEQUENCE1]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 176, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 176, align 1, addrspace 4) ; GCN: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[DEF4]].sub0 - ; GCN: [[S_MOV_B32_7:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 88 + ; GCN: [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 88 ; GCN: [[DEF5:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: BUFFER_ATOMIC_CMPSWAP_OFFSET [[REG_SEQUENCE1]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_7]], 88, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 176, align 1, addrspace 4) ; GCN: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[DEF5]].sub0 - ; GCN: [[S_MOV_B32_8:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 176 + ; GCN: [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 176 ; GCN: [[DEF6:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: BUFFER_ATOMIC_CMPSWAP_OFFSET [[REG_SEQUENCE1]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_8]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 176, align 1, addrspace 4) ; GCN: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[DEF6]].sub0 @@ -103,18 +103,18 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[DEF8]].sub0 ; GCN: INLINEASM &"", 1 ; GCN: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[BUFFER_LOAD_DWORDX4_OFFSET1]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 192, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 192, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_9:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 96 + ; GCN: [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 96 ; GCN: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[BUFFER_LOAD_DWORDX4_OFFSET2]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_9]], 96, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 192, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_10:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 192 + ; GCN: [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 192 ; GCN: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[BUFFER_LOAD_DWORDX4_OFFSET3]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_10]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 192, align 1, addrspace 4) ; GCN: BUFFER_STORE_DWORDX4_OFFEN_exact killed [[BUFFER_LOAD_DWORDX4_OFFEN1]], [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_10]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7, align 1, addrspace 4) ; GCN: [[COPY15:%[0-9]+]]:sreg_32 = COPY [[COPY]] ; GCN: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[BUFFER_LOAD_DWORDX4_OFFSET4]], [[S_LOAD_DWORDX4_IMM]], [[COPY15]], 192, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7, align 1, addrspace 4) ; GCN: INLINEASM &"", 1 ; GCN: BUFFER_STORE_FORMAT_XYZW_OFFSET_exact killed [[BUFFER_LOAD_FORMAT_XYZW_OFFSET1]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 208, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 208, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_11:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 104 + ; GCN: [[S_MOV_B32_11:%[0-9]+]]:sreg_32 = S_MOV_B32 104 ; GCN: BUFFER_STORE_FORMAT_XYZW_OFFSET_exact killed [[BUFFER_LOAD_FORMAT_XYZW_OFFSET2]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_11]], 104, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 208, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_12:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 208 + ; GCN: [[S_MOV_B32_12:%[0-9]+]]:sreg_32 = S_MOV_B32 208 ; GCN: BUFFER_STORE_FORMAT_XYZW_OFFSET_exact killed [[BUFFER_LOAD_FORMAT_XYZW_OFFSET3]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_12]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 208, align 1, addrspace 4) ; GCN: BUFFER_STORE_FORMAT_XYZW_OFFEN_exact killed [[BUFFER_LOAD_FORMAT_XYZW_OFFEN1]], [[COPY]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_12]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7, align 1, addrspace 4) ; GCN: [[COPY16:%[0-9]+]]:sreg_32 = COPY [[COPY]] @@ -122,10 +122,10 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: INLINEASM &"", 1 ; GCN: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[BUFFER_LOAD_DWORDX4_IDXEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY17]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 224, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 224, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_13:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 112 + ; GCN: [[S_MOV_B32_13:%[0-9]+]]:sreg_32 = S_MOV_B32 112 ; GCN: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[BUFFER_LOAD_DWORDX4_IDXEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY18]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_13]], 112, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 224, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_14:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 224 + ; GCN: [[S_MOV_B32_14:%[0-9]+]]:sreg_32 = S_MOV_B32 224 ; GCN: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[BUFFER_LOAD_DWORDX4_IDXEN4:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY19]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_14]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 224, align 1, addrspace 4) ; GCN: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[COPY]], %subreg.sub1 @@ -138,10 +138,10 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: INLINEASM &"", 1 ; GCN: [[COPY22:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_IDXEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_IDXEN [[COPY22]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 240, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 240, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_15:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 120 + ; GCN: [[S_MOV_B32_15:%[0-9]+]]:sreg_32 = S_MOV_B32 120 ; GCN: [[COPY23:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_IDXEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_IDXEN [[COPY23]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_15]], 120, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 240, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_16:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 240 + ; GCN: [[S_MOV_B32_16:%[0-9]+]]:sreg_32 = S_MOV_B32 240 ; GCN: [[COPY24:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_IDXEN4:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_IDXEN [[COPY24]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_16]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7 + 240, align 1, addrspace 4) ; GCN: [[BUFFER_LOAD_FORMAT_XYZW_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_FORMAT_XYZW_BOTHEN [[REG_SEQUENCE2]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_16]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7, align 1, addrspace 4) @@ -155,7 +155,7 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: BUFFER_ATOMIC_ADD_IDXEN [[COPY]], [[COPY27]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 256, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 256, align 1, addrspace 4) ; GCN: [[COPY28:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_ATOMIC_ADD_IDXEN [[COPY]], [[COPY28]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_2]], 128, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 256, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_17:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 256 + ; GCN: [[S_MOV_B32_17:%[0-9]+]]:sreg_32 = S_MOV_B32 256 ; GCN: [[COPY29:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_ATOMIC_ADD_IDXEN [[COPY]], [[COPY29]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_17]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 256, align 1, addrspace 4) ; GCN: BUFFER_ATOMIC_ADD_BOTHEN [[COPY]], [[REG_SEQUENCE2]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_17]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7, align 1, addrspace 4) @@ -169,12 +169,12 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: [[DEF9:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: BUFFER_ATOMIC_CMPSWAP_IDXEN [[REG_SEQUENCE1]], [[COPY32]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 272, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 272, align 1, addrspace 4) ; GCN: [[COPY33:%[0-9]+]]:vgpr_32 = COPY [[DEF9]].sub0 - ; GCN: [[S_MOV_B32_18:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 136 + ; GCN: [[S_MOV_B32_18:%[0-9]+]]:sreg_32 = S_MOV_B32 136 ; GCN: [[COPY34:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[DEF10:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: BUFFER_ATOMIC_CMPSWAP_IDXEN [[REG_SEQUENCE1]], [[COPY34]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_18]], 136, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 272, align 1, addrspace 4) ; GCN: [[COPY35:%[0-9]+]]:vgpr_32 = COPY [[DEF10]].sub0 - ; GCN: [[S_MOV_B32_19:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 272 + ; GCN: [[S_MOV_B32_19:%[0-9]+]]:sreg_32 = S_MOV_B32 272 ; GCN: [[COPY36:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: [[DEF11:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: BUFFER_ATOMIC_CMPSWAP_IDXEN [[REG_SEQUENCE1]], [[COPY36]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_19]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom TargetCustom7 + 272, align 1, addrspace 4) @@ -198,7 +198,7 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: BUFFER_STORE_DWORDX4_IDXEN_exact killed [[BUFFER_LOAD_DWORDX4_IDXEN2]], [[COPY44]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 288, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 288, align 1, addrspace 4) ; GCN: [[COPY45:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_STORE_DWORDX4_IDXEN_exact killed [[BUFFER_LOAD_DWORDX4_IDXEN3]], [[COPY45]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_4]], 144, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 288, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_20:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 288 + ; GCN: [[S_MOV_B32_20:%[0-9]+]]:sreg_32 = S_MOV_B32 288 ; GCN: [[COPY46:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_STORE_DWORDX4_IDXEN_exact killed [[BUFFER_LOAD_DWORDX4_IDXEN4]], [[COPY46]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_20]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 288, align 1, addrspace 4) ; GCN: BUFFER_STORE_DWORDX4_BOTHEN_exact killed [[BUFFER_LOAD_DWORDX4_BOTHEN]], [[REG_SEQUENCE2]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_20]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7, align 1, addrspace 4) @@ -210,10 +210,10 @@ define amdgpu_cs void @mmo_offsets0(<4 x i32> addrspace(6)* inreg noalias derefe ; GCN: INLINEASM &"", 1 ; GCN: [[COPY49:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_STORE_FORMAT_XYZW_IDXEN_exact killed [[BUFFER_LOAD_FORMAT_XYZW_IDXEN2]], [[COPY49]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_]], 304, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 304, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_21:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 152 + ; GCN: [[S_MOV_B32_21:%[0-9]+]]:sreg_32 = S_MOV_B32 152 ; GCN: [[COPY50:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_STORE_FORMAT_XYZW_IDXEN_exact killed [[BUFFER_LOAD_FORMAT_XYZW_IDXEN3]], [[COPY50]], [[S_LOAD_DWORDX4_IMM]], killed [[S_MOV_B32_21]], 152, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 304, align 1, addrspace 4) - ; GCN: [[S_MOV_B32_22:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 304 + ; GCN: [[S_MOV_B32_22:%[0-9]+]]:sreg_32 = S_MOV_B32 304 ; GCN: [[COPY51:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; GCN: BUFFER_STORE_FORMAT_XYZW_IDXEN_exact killed [[BUFFER_LOAD_FORMAT_XYZW_IDXEN4]], [[COPY51]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_22]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7 + 304, align 1, addrspace 4) ; GCN: BUFFER_STORE_FORMAT_XYZW_BOTHEN_exact killed [[BUFFER_LOAD_FORMAT_XYZW_BOTHEN]], [[REG_SEQUENCE2]], [[S_LOAD_DWORDX4_IMM]], [[S_MOV_B32_22]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom TargetCustom7, align 1, addrspace 4) diff --git a/llvm/test/CodeGen/AMDGPU/extract_subvector_vec4_vec3.ll b/llvm/test/CodeGen/AMDGPU/extract_subvector_vec4_vec3.ll index 91afdab..0c26425 100644 --- a/llvm/test/CodeGen/AMDGPU/extract_subvector_vec4_vec3.ll +++ b/llvm/test/CodeGen/AMDGPU/extract_subvector_vec4_vec3.ll @@ -8,8 +8,8 @@ define amdgpu_hs void @main([0 x i8] addrspace(6)* inreg %arg) { ; GCN-LABEL: name: main ; GCN: bb.0.main_body: - ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0 - ; GCN: [[DEF:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[DEF]] ; GCN: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF ; GCN: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY]], [[DEF1]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom TargetCustom7, align 1, addrspace 4) @@ -18,7 +18,7 @@ define amdgpu_hs void @main([0 x i8] addrspace(6)* inreg %arg) { ; GCN: [[COPY3:%[0-9]+]]:sgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFEN]].sub0 ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_96 = REG_SEQUENCE killed [[COPY3]], %subreg.sub0, killed [[COPY2]], %subreg.sub1, killed [[COPY1]], %subreg.sub2 ; GCN: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[REG_SEQUENCE]] - ; GCN: [[DEF2:%[0-9]+]]:sreg_32_xm0 = IMPLICIT_DEF + ; GCN: [[DEF2:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; GCN: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[DEF2]] ; GCN: [[DEF3:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF ; GCN: BUFFER_STORE_DWORDX3_OFFEN_exact killed [[COPY4]], [[COPY5]], [[DEF3]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 12 into custom TargetCustom7, align 1, addrspace 4) @@ -34,4 +34,3 @@ main_body: declare void @llvm.amdgcn.raw.buffer.store.v3i32(<3 x i32>, <4 x i32>, i32, i32, i32 immarg) declare <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32>, i32, i32, i32 immarg) - diff --git a/llvm/test/CodeGen/AMDGPU/inline-constraints.ll b/llvm/test/CodeGen/AMDGPU/inline-constraints.ll index 8a4af42..6f1d355 100644 --- a/llvm/test/CodeGen/AMDGPU/inline-constraints.ll +++ b/llvm/test/CodeGen/AMDGPU/inline-constraints.ll @@ -30,11 +30,10 @@ entry: ret void } -; FIXME: Should be able to avoid copy ; GCN-LABEL: {{^}}inline_sreg_constraint_m0: ; GCN: s_mov_b32 m0, -1 -; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 -; GCN: ; use [[COPY_M0]] +; GCN-NOT: m0 +; GCN: ; use m0 define amdgpu_kernel void @inline_sreg_constraint_m0() { %m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={m0}"() tail call void asm sideeffect "; use $0", "s"(i32 %m0) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll index f28f40c..991afdb 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll @@ -30,11 +30,9 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold(i32 addrspace(1)* %out) # ret void } -; TODO: m0 should be folded. ; CHECK-LABEL: {{^}}test_readfirstlane_m0: ; CHECK: s_mov_b32 m0, -1 -; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 -; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]] +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0 ; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VVAL]] define amdgpu_kernel void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 { %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"() diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll index f4f48d3..230d7af 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll @@ -44,8 +44,7 @@ define amdgpu_kernel void @test_readlane_vregs(i32 addrspace(1)* %out, <2 x i32> ; TODO: m0 should be folded. ; CHECK-LABEL: {{^}}test_readlane_m0_sreg: ; CHECK: s_mov_b32 m0, -1 -; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 -; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]] +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0 ; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VVAL]] define amdgpu_kernel void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 { %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"() diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll index f7f1fd5..fd93fbc 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll @@ -39,9 +39,9 @@ define amdgpu_kernel void @test_writelane_vreg_lane(i32 addrspace(1)* %out, <2 x ; CHECK-LABEL: {{^}}test_writelane_m0_sreg: ; CHECK: s_mov_b32 m0, -1 -; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 +; CIGFX9: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 ; CIGFX9: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], m0 -; GFX10: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], s{{[0-9]+}} +; GFX10: v_writelane_b32 v{{[0-9]+}}, m0, s{{[0-9]+}} define amdgpu_kernel void @test_writelane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 { %oldval = load i32, i32 addrspace(1)* %out %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"() diff --git a/llvm/test/CodeGen/AMDGPU/read_register.ll b/llvm/test/CodeGen/AMDGPU/read_register.ll index 8fe9e7f..9a8c468 100644 --- a/llvm/test/CodeGen/AMDGPU/read_register.ll +++ b/llvm/test/CodeGen/AMDGPU/read_register.ll @@ -3,11 +3,9 @@ declare i32 @llvm.read_register.i32(metadata) #0 declare i64 @llvm.read_register.i64(metadata) #0 -; FIXME: Should be able to eliminate copy ; CHECK-LABEL: {{^}}test_read_m0: ; CHECK: s_mov_b32 m0, -1 -; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 -; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], [[COPY_M0]] +; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], m0 ; CHECK: buffer_store_dword [[COPY]] define amdgpu_kernel void @test_read_m0(i32 addrspace(1)* %out) #0 { store volatile i32 0, i32 addrspace(3)* undef -- 2.7.4