From: Amara Emerson Date: Fri, 30 Apr 2021 00:04:20 +0000 (-0700) Subject: [AArch64][GlobalISel] Use a single MachineIRBuilder for most of isel. NFC. X-Git-Tag: llvmorg-14-init~7911 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7d2562c2daad83adadf14f3719cf7199c5bfd9cf;p=platform%2Fupstream%2Fllvm.git [AArch64][GlobalISel] Use a single MachineIRBuilder for most of isel. NFC. This is a long overdue cleanup. Not every use is eliminated, I stuck to uses that were directly being called from select(), and not the render functions. Differential Revision: https://reviews.llvm.org/D101590 --- diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp index 047f3b6..de44949 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -74,6 +74,7 @@ public: CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) override { InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); + MIB.setMF(MF); // hasFnAttribute() is expensive to call on every BRCOND selection, so // cache it here for each run of the selector. @@ -94,12 +95,12 @@ private: bool preISelLower(MachineInstr &I); // An early selection function that runs before the selectImpl() call. - bool earlySelect(MachineInstr &I) const; + bool earlySelect(MachineInstr &I); // Do some preprocessing of G_PHIs before we begin selection. void processPHIs(MachineFunction &MF); - bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI); /// Eliminate same-sized cross-bank copies into stores before selectImpl(). bool contractCrossBankCopyIntoStore(MachineInstr &I, @@ -126,10 +127,10 @@ private: ///@} bool selectCompareBranch(MachineInstr &I, MachineFunction &MF, - MachineRegisterInfo &MRI) const; + MachineRegisterInfo &MRI); - bool selectVectorAshrLshr(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectVectorAshrLshr(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI); // Helper to generate an equivalent of scalar_to_vector into a new register, // returned via 'Dst'. @@ -156,30 +157,29 @@ private: /// otherwise. MachineInstr *emitConstantVector(Register Dst, Constant *CV, MachineIRBuilder &MIRBuilder, - MachineRegisterInfo &MRI) const; + MachineRegisterInfo &MRI); - bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI); bool tryOptConstantBuildVec(MachineInstr &MI, LLT DstTy, - MachineRegisterInfo &MRI) const; - bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const; - - bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectSplitVectorUnmerge(MachineInstr &I, - MachineRegisterInfo &MRI) const; + MachineRegisterInfo &MRI); + bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI); + + bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectSplitVectorUnmerge(MachineInstr &I, MachineRegisterInfo &MRI); bool selectIntrinsicWithSideEffects(MachineInstr &I, - MachineRegisterInfo &MRI) const; + MachineRegisterInfo &MRI); bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI); - bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI); bool selectIntrinsicTrunc(MachineInstr &I, MachineRegisterInfo &MRI) const; bool selectIntrinsicRound(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectTLSGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI) const; - bool selectReduction(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectTLSGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI); + bool selectReduction(MachineInstr &I, MachineRegisterInfo &MRI); unsigned emitConstantPoolEntry(const Constant *CPVal, MachineFunction &MF) const; @@ -414,11 +414,10 @@ private: int OpIdx = -1) const; // Materialize a GlobalValue or BlockAddress using a movz+movk sequence. - void materializeLargeCMVal(MachineInstr &I, const Value *V, - unsigned OpFlags) const; + void materializeLargeCMVal(MachineInstr &I, const Value *V, unsigned OpFlags); // Optimization methods. - bool tryOptSelect(MachineInstr &MI) const; + bool tryOptSelect(MachineInstr &MI); MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate, MachineIRBuilder &MIRBuilder) const; @@ -444,6 +443,8 @@ private: // clobbered by calls. Register MFReturnAddr; + MachineIRBuilder MIB; + #define GET_GLOBALISEL_PREDICATES_DECL #include "AArch64GenGlobalISel.inc" #undef GET_GLOBALISEL_PREDICATES_DECL @@ -1627,7 +1628,7 @@ bool AArch64InstructionSelector::selectCompareBranchFedByICmp( } bool AArch64InstructionSelector::selectCompareBranch( - MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) { Register CondReg = I.getOperand(0).getReg(); MachineInstr *CCMI = MRI.getVRegDef(CondReg); if (CCMI->getOpcode() == TargetOpcode::G_TRUNC) { @@ -1637,7 +1638,6 @@ bool AArch64InstructionSelector::selectCompareBranch( // Try to select the G_BRCOND using whatever is feeding the condition if // possible. - MachineIRBuilder MIB(I); unsigned CCMIOpc = CCMI->getOpcode(); if (CCMIOpc == TargetOpcode::G_FCMP) return selectCompareBranchFedByFCmp(I, *CCMI, MIB); @@ -1709,8 +1709,8 @@ static Optional getVectorSHLImm(LLT SrcTy, Register Reg, MachineRegiste return Imm; } -bool AArch64InstructionSelector::selectVectorSHL( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I, + MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_SHL); Register DstReg = I.getOperand(0).getReg(); const LLT Ty = MRI.getType(DstReg); @@ -1744,7 +1744,6 @@ bool AArch64InstructionSelector::selectVectorSHL( return false; } - MachineIRBuilder MIB(I); auto Shl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg}); if (ImmVal) Shl.addImm(*ImmVal); @@ -1756,7 +1755,7 @@ bool AArch64InstructionSelector::selectVectorSHL( } bool AArch64InstructionSelector::selectVectorAshrLshr( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_ASHR || I.getOpcode() == TargetOpcode::G_LSHR); Register DstReg = I.getOperand(0).getReg(); @@ -1806,7 +1805,6 @@ bool AArch64InstructionSelector::selectVectorAshrLshr( return false; } - MachineIRBuilder MIB(I); auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg}); constrainSelectedInstRegOperands(*Neg, TII, TRI, RBI); auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg}); @@ -1848,11 +1846,10 @@ bool AArch64InstructionSelector::selectVaStartDarwin( } void AArch64InstructionSelector::materializeLargeCMVal( - MachineInstr &I, const Value *V, unsigned OpFlags) const { + MachineInstr &I, const Value *V, unsigned OpFlags) { MachineBasicBlock &MBB = *I.getParent(); MachineFunction &MF = *MBB.getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - MachineIRBuilder MIB(I); auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {}); MovZ->addOperand(MF, I.getOperand(1)); @@ -1913,7 +1910,6 @@ bool AArch64InstructionSelector::preISelLower(MachineInstr &I) { assert(AmtMI && "could not find a vreg definition for shift amount"); if (AmtMI->getOpcode() != TargetOpcode::G_CONSTANT) { // Insert a subregister copy to implement a 64->32 trunc - MachineIRBuilder MIB(I); auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {}) .addReg(ShiftReg, 0, AArch64::sub_32); MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID)); @@ -1928,7 +1924,6 @@ bool AArch64InstructionSelector::preISelLower(MachineInstr &I) { // Allow matching with imported patterns for stores of pointers. Unlike // G_LOAD/G_PTR_ADD, we may not have selected all users. So, emit a copy // and constrain. - MachineIRBuilder MIB(I); auto Copy = MIB.buildCopy(LLT::scalar(64), SrcOp); Register NewSrc = Copy.getReg(0); SrcOp.setReg(NewSrc); @@ -1956,7 +1951,6 @@ bool AArch64InstructionSelector::preISelLower(MachineInstr &I) { LLT DstTy = MRI.getType(I.getOperand(0).getReg()); if (!DstTy.getElementType().isPointer()) return false; - MachineIRBuilder MIB(I); auto NewSrc = MIB.buildCopy(LLT::scalar(64), I.getOperand(1).getReg()); MRI.setType(I.getOperand(0).getReg(), DstTy.changeElementType(LLT::scalar(64))); @@ -2007,7 +2001,6 @@ bool AArch64InstructionSelector::convertPtrAddToAdd( if (PtrTy.getAddressSpace() != 0) return false; - MachineIRBuilder MIB(I); const LLT CastPtrTy = PtrTy.isVector() ? LLT::vector(2, 64) : LLT::scalar(64); auto PtrToInt = MIB.buildPtrToInt(CastPtrTy, AddOp1Reg); // Set regbanks on the registers. @@ -2036,8 +2029,8 @@ bool AArch64InstructionSelector::convertPtrAddToAdd( return true; } -bool AArch64InstructionSelector::earlySelectSHL( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::earlySelectSHL(MachineInstr &I, + MachineRegisterInfo &MRI) { // We try to match the immediate variant of LSL, which is actually an alias // for a special case of UBFM. Otherwise, we fall back to the imported // selector which will match the register variant. @@ -2053,7 +2046,6 @@ bool AArch64InstructionSelector::earlySelectSHL( bool Is64Bit = DstTy.getSizeInBits() == 64; auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO); auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO); - MachineIRBuilder MIB(I); if (!Imm1Fn || !Imm2Fn) return false; @@ -2113,7 +2105,7 @@ bool AArch64InstructionSelector::contractCrossBankCopyIntoStore( return true; } -bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const { +bool AArch64InstructionSelector::earlySelect(MachineInstr &I) { assert(I.getParent() && "Instruction should be in a basic block!"); assert(I.getParent()->getParent() && "Instruction should be in a function!"); @@ -2135,8 +2127,7 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const { MRI.getType(Dst).getNumElements(), ConstantInt::get(Type::getIntNTy(Ctx, MRI.getType(Src).getSizeInBits()), ValAndVReg->Value)); - MachineIRBuilder MIRBuilder(I); - if (!emitConstantVector(Dst, CV, MIRBuilder, MRI)) + if (!emitConstantVector(Dst, CV, MIB, MRI)) return false; I.eraseFromParent(); return true; @@ -2204,12 +2195,11 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const { if (!Cmp) return false; } - MachineIRBuilder MIRBuilder(I); auto Pred = static_cast(Cmp->getOperand(1).getPredicate()); emitIntegerCompare(Cmp->getOperand(2), Cmp->getOperand(3), - Cmp->getOperand(1), MIRBuilder); - emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIRBuilder, X); + Cmp->getOperand(1), MIB); + emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIB, X); I.eraseFromParent(); return true; } @@ -2234,6 +2224,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { return false; } + MIB.setInstrAndDebugLoc(I); + unsigned Opcode = I.getOpcode(); // G_PHI requires same handling as PHI if (!I.isPreISelOpcode() || Opcode == TargetOpcode::G_PHI) { @@ -2303,8 +2295,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { LLT Ty = I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{}; - MachineIRBuilder MIB(I); - switch (Opcode) { case TargetOpcode::G_SBFX: case TargetOpcode::G_UBFX: { @@ -2322,7 +2312,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { assert(Cst2 && "Should have gotten a constant for src 2?"); auto LSB = Cst1->Value.getZExtValue(); auto Width = Cst2->Value.getZExtValue(); - MachineIRBuilder MIB(I); auto BitfieldInst = MIB.buildInstr(Opc, {I.getOperand(0)}, {I.getOperand(1)}) .addImm(LSB) @@ -2354,7 +2343,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { } assert(TM.getCodeModel() == CodeModel::Small && "Expected small code model"); - MachineIRBuilder MIB(I); auto Op1 = BaseMI->getOperand(1); auto Op2 = I.getOperand(2); auto MovAddr = MIB.buildInstr(AArch64::MOVaddr, {I.getOperand(0)}, {}) @@ -2476,7 +2464,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { if (DefSize == 64 || DefSize == 128 || (DefSize == 32 && shouldOptForSize(&MF))) { auto *FPImm = I.getOperand(1).getFPImm(); - MachineIRBuilder MIB(I); auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB); if (!LoadMI) { LLVM_DEBUG(dbgs() << "Failed to load double constant pool entry\n"); @@ -2545,7 +2532,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { if (Offset % 64 != 0) return false; unsigned LaneIdx = Offset / 64; - MachineIRBuilder MIB(I); MachineInstr *Extract = emitExtractVectorElt( DstReg, DstRB, LLT::scalar(64), SrcReg, LaneIdx, MIB); if (!Extract) @@ -2656,8 +2642,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { case TargetOpcode::G_LOAD: case TargetOpcode::G_STORE: { bool IsZExtLoad = I.getOpcode() == TargetOpcode::G_ZEXTLOAD; - MachineIRBuilder MIB(I); - LLT PtrTy = MRI.getType(I.getOperand(1).getReg()); if (PtrTy != LLT::pointer(0, 64)) { @@ -2829,9 +2813,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { } case TargetOpcode::G_PTR_ADD: { - MachineIRBuilder MIRBuilder(I); - emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2), - MIRBuilder); + emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2), MIB); I.eraseFromParent(); return true; } @@ -2840,18 +2822,16 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { case TargetOpcode::G_SSUBO: case TargetOpcode::G_USUBO: { // Emit the operation and get the correct condition code. - MachineIRBuilder MIRBuilder(I); auto OpAndCC = emitOverflowOp(Opcode, I.getOperand(0).getReg(), - I.getOperand(2), I.getOperand(3), MIRBuilder); + I.getOperand(2), I.getOperand(3), MIB); // Now, put the overflow result in the register given by the first operand // to the overflow op. CSINC increments the result when the predicate is // false, so to get the increment when it's true, we need to use the // inverse. In this case, we want to increment when carry is set. Register ZReg = AArch64::WZR; - auto CsetMI = MIRBuilder - .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()}, - {ZReg, ZReg}) + auto CsetMI = MIB.buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()}, + {ZReg, ZReg}) .addImm(getInvertedCondCode(OpAndCC.second)); constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI); I.eraseFromParent(); @@ -2931,7 +2911,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { } if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128) { - MachineIRBuilder MIB(I); MachineInstr *Extract = emitExtractVectorElt( DstReg, DstRB, LLT::scalar(DstTy.getSizeInBits()), SrcReg, 0, MIB); if (!Extract) @@ -3019,7 +2998,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { AArch64::GPRRegBankID && "Unexpected ext regbank"); - MachineIRBuilder MIB(I); MachineInstr *ExtI; // First check if we're extending the result of a load which has a dest type @@ -3153,7 +3131,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { // Make sure to use an unused vreg instead of wzr, so that the peephole // optimizations will be able to optimize these. - MachineIRBuilder MIB(I); Register DeadVReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); auto TstMI = MIB.buildInstr(AArch64::ANDSWri, {DeadVReg}, {CondReg}) .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); @@ -3173,22 +3150,20 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { return false; } - MachineIRBuilder MIRBuilder(I); auto Pred = static_cast(I.getOperand(1).getPredicate()); emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), - MIRBuilder); - emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIRBuilder); + MIB); + emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIB); I.eraseFromParent(); return true; } case TargetOpcode::G_FCMP: { - MachineIRBuilder MIRBuilder(I); CmpInst::Predicate Pred = static_cast(I.getOperand(1).getPredicate()); - if (!emitFPCompare(I.getOperand(2).getReg(), I.getOperand(3).getReg(), - MIRBuilder, Pred) || - !emitCSetForFCmp(I.getOperand(0).getReg(), Pred, MIRBuilder)) + if (!emitFPCompare(I.getOperand(2).getReg(), I.getOperand(3).getReg(), MIB, + Pred) || + !emitCSetForFCmp(I.getOperand(0).getReg(), Pred, MIB)) return false; I.eraseFromParent(); return true; @@ -3278,15 +3253,14 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { return false; } -bool AArch64InstructionSelector::selectReduction( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::selectReduction(MachineInstr &I, + MachineRegisterInfo &MRI) { Register VecReg = I.getOperand(1).getReg(); LLT VecTy = MRI.getType(VecReg); if (I.getOpcode() == TargetOpcode::G_VECREDUCE_ADD) { // For <2 x i32> ADDPv2i32 generates an FPR64 value, so we need to emit // a subregister copy afterwards. if (VecTy == LLT::vector(2, 32)) { - MachineIRBuilder MIB(I); Register DstReg = I.getOperand(0).getReg(); auto AddP = MIB.buildInstr(AArch64::ADDPv2i32, {&AArch64::FPR64RegClass}, {VecReg, VecReg}); @@ -3332,12 +3306,11 @@ bool AArch64InstructionSelector::selectReduction( } bool AArch64InstructionSelector::selectBrJT(MachineInstr &I, - MachineRegisterInfo &MRI) const { + MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT"); Register JTAddr = I.getOperand(0).getReg(); unsigned JTI = I.getOperand(1).getIndex(); Register Index = I.getOperand(2).getReg(); - MachineIRBuilder MIB(I); Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass); @@ -3352,15 +3325,14 @@ bool AArch64InstructionSelector::selectBrJT(MachineInstr &I, return constrainSelectedInstRegOperands(*JumpTableInst, TII, TRI, RBI); } -bool AArch64InstructionSelector::selectJumpTable( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::selectJumpTable(MachineInstr &I, + MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table"); assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!"); Register DstReg = I.getOperand(0).getReg(); unsigned JTI = I.getOperand(1).getIndex(); // We generate a MOVaddrJT which will get expanded to an ADRP + ADD later. - MachineIRBuilder MIB(I); auto MovMI = MIB.buildInstr(AArch64::MOVaddrJT, {DstReg}, {}) .addJumpTableIndex(JTI, AArch64II::MO_PAGE) @@ -3370,7 +3342,7 @@ bool AArch64InstructionSelector::selectJumpTable( } bool AArch64InstructionSelector::selectTLSGlobalValue( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { if (!STI.isTargetMachO()) return false; MachineFunction &MF = *I.getParent()->getParent(); @@ -3380,7 +3352,6 @@ bool AArch64InstructionSelector::selectTLSGlobalValue( assert(GlobalOp.getOffset() == 0 && "Shouldn't have an offset on TLS globals!"); const GlobalValue &GV = *GlobalOp.getGlobal(); - MachineIRBuilder MIB(I); auto LoadGOT = MIB.buildInstr(AArch64::LOADgot, {&AArch64::GPR64commonRegClass}, {}) @@ -3517,7 +3488,7 @@ bool AArch64InstructionSelector::selectIntrinsicRound( } bool AArch64InstructionSelector::selectVectorICmp( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { Register DstReg = I.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); Register SrcReg = I.getOperand(2).getReg(); @@ -3672,7 +3643,6 @@ bool AArch64InstructionSelector::selectVectorICmp( if (SwapOperands) std::swap(SrcReg, Src2Reg); - MachineIRBuilder MIB(I); auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg}); constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI); @@ -3716,7 +3686,7 @@ MachineInstr *AArch64InstructionSelector::emitScalarToVector( } bool AArch64InstructionSelector::selectMergeValues( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode"); const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); const LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); @@ -3730,7 +3700,6 @@ bool AArch64InstructionSelector::selectMergeValues( if (DstTy == LLT::scalar(128)) { if (SrcTy.getSizeInBits() != 64) return false; - MachineIRBuilder MIB(I); Register DstReg = I.getOperand(0).getReg(); Register Src1Reg = I.getOperand(1).getReg(); Register Src2Reg = I.getOperand(2).getReg(); @@ -3871,7 +3840,7 @@ MachineInstr *AArch64InstructionSelector::emitExtractVectorElt( } bool AArch64InstructionSelector::selectExtractElt( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT && "unexpected opcode!"); Register DstReg = I.getOperand(0).getReg(); @@ -3898,11 +3867,10 @@ bool AArch64InstructionSelector::selectExtractElt( return false; unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); - MachineIRBuilder MIRBuilder(I); const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg, - LaneIdx, MIRBuilder); + LaneIdx, MIB); if (!Extract) return false; @@ -3911,7 +3879,7 @@ bool AArch64InstructionSelector::selectExtractElt( } bool AArch64InstructionSelector::selectSplitVectorUnmerge( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { unsigned NumElts = I.getNumOperands() - 1; Register SrcReg = I.getOperand(NumElts).getReg(); const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg()); @@ -3923,8 +3891,6 @@ bool AArch64InstructionSelector::selectSplitVectorUnmerge( return false; } - MachineIRBuilder MIB(I); - // We implement a split vector operation by treating the sub-vectors as // scalars and extracting them. const RegisterBank &DstRB = @@ -3940,8 +3906,8 @@ bool AArch64InstructionSelector::selectSplitVectorUnmerge( return true; } -bool AArch64InstructionSelector::selectUnmergeValues( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::selectUnmergeValues(MachineInstr &I, + MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && "unexpected opcode"); @@ -3970,8 +3936,6 @@ bool AArch64InstructionSelector::selectUnmergeValues( if (!NarrowTy.isScalar()) return selectSplitVectorUnmerge(I, MRI); - MachineIRBuilder MIB(I); - // Choose a lane copy opcode and subregister based off of the size of the // vector's elements. unsigned CopyOpc = 0; @@ -4056,14 +4020,13 @@ bool AArch64InstructionSelector::selectUnmergeValues( } bool AArch64InstructionSelector::selectConcatVectors( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && "Unexpected opcode"); Register Dst = I.getOperand(0).getReg(); Register Op1 = I.getOperand(1).getReg(); Register Op2 = I.getOperand(2).getReg(); - MachineIRBuilder MIRBuilder(I); - MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder); + MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIB); if (!ConcatMI) return false; I.eraseFromParent(); @@ -4470,8 +4433,7 @@ AArch64InstructionSelector::emitOverflowOp(unsigned Opcode, Register Dst, } } -bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const { - MachineIRBuilder MIB(I); +bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) { MachineRegisterInfo &MRI = *MIB.getMRI(); // We want to recognize this pattern: // @@ -4628,7 +4590,7 @@ MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare( } bool AArch64InstructionSelector::selectShuffleVector( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); Register Src1Reg = I.getOperand(1).getReg(); const LLT Src1Ty = MRI.getType(Src1Reg); @@ -4661,11 +4623,9 @@ bool AArch64InstructionSelector::selectShuffleVector( } } - MachineIRBuilder MIRBuilder(I); - // Use a constant pool to load the index vector for TBL. Constant *CPVal = ConstantVector::get(CstIdxs); - MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIRBuilder); + MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIB); if (!IndexLoad) { LLVM_DEBUG(dbgs() << "Could not load from a constant pool"); return false; @@ -4674,25 +4634,23 @@ bool AArch64InstructionSelector::selectShuffleVector( if (DstTy.getSizeInBits() != 128) { assert(DstTy.getSizeInBits() == 64 && "Unexpected shuffle result ty"); // This case can be done with TBL1. - MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIRBuilder); + MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIB); if (!Concat) { LLVM_DEBUG(dbgs() << "Could not do vector concat for tbl1"); return false; } // The constant pool load will be 64 bits, so need to convert to FPR128 reg. - IndexLoad = - emitScalarToVector(64, &AArch64::FPR128RegClass, - IndexLoad->getOperand(0).getReg(), MIRBuilder); + IndexLoad = emitScalarToVector(64, &AArch64::FPR128RegClass, + IndexLoad->getOperand(0).getReg(), MIB); - auto TBL1 = MIRBuilder.buildInstr( + auto TBL1 = MIB.buildInstr( AArch64::TBLv16i8One, {&AArch64::FPR128RegClass}, {Concat->getOperand(0).getReg(), IndexLoad->getOperand(0).getReg()}); constrainSelectedInstRegOperands(*TBL1, TII, TRI, RBI); auto Copy = - MIRBuilder - .buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {}) + MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {}) .addReg(TBL1.getReg(0), 0, AArch64::dsub); RBI.constrainGenericRegister(Copy.getReg(0), AArch64::FPR64RegClass, MRI); I.eraseFromParent(); @@ -4701,15 +4659,14 @@ bool AArch64InstructionSelector::selectShuffleVector( // For TBL2 we need to emit a REG_SEQUENCE to tie together two consecutive // Q registers for regalloc. - auto RegSeq = MIRBuilder - .buildInstr(TargetOpcode::REG_SEQUENCE, - {&AArch64::QQRegClass}, {Src1Reg}) + auto RegSeq = MIB.buildInstr(TargetOpcode::REG_SEQUENCE, + {&AArch64::QQRegClass}, {Src1Reg}) .addImm(AArch64::qsub0) .addUse(Src2Reg) .addImm(AArch64::qsub1); - auto TBL2 = MIRBuilder.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0)}, - {RegSeq, IndexLoad->getOperand(0)}); + auto TBL2 = MIB.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0)}, + {RegSeq, IndexLoad->getOperand(0)}); constrainSelectedInstRegOperands(*RegSeq, TII, TRI, RBI); constrainSelectedInstRegOperands(*TBL2, TII, TRI, RBI); I.eraseFromParent(); @@ -4747,8 +4704,8 @@ MachineInstr *AArch64InstructionSelector::emitLaneInsert( return InsElt; } -bool AArch64InstructionSelector::selectInsertElt( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::selectInsertElt(MachineInstr &I, + MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT); // Get information on the destination. @@ -4774,13 +4731,12 @@ bool AArch64InstructionSelector::selectInsertElt( // Perform the lane insert. Register SrcReg = I.getOperand(1).getReg(); const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI); - MachineIRBuilder MIRBuilder(I); if (VecSize < 128) { // If the vector we're inserting into is smaller than 128 bits, widen it // to 128 to do the insert. - MachineInstr *ScalarToVec = emitScalarToVector( - VecSize, &AArch64::FPR128RegClass, SrcReg, MIRBuilder); + MachineInstr *ScalarToVec = + emitScalarToVector(VecSize, &AArch64::FPR128RegClass, SrcReg, MIB); if (!ScalarToVec) return false; SrcReg = ScalarToVec->getOperand(0).getReg(); @@ -4790,7 +4746,7 @@ bool AArch64InstructionSelector::selectInsertElt( // Note that if our vector is already 128 bits, we end up emitting an extra // register. MachineInstr *InsMI = - emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIRBuilder); + emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIB); if (VecSize < 128) { // If we had to widen to perform the insert, then we have to demote back to @@ -4810,7 +4766,7 @@ bool AArch64InstructionSelector::selectInsertElt( << "\n"); return false; } - MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {}) + MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {}) .addReg(DemoteVec, 0, SubReg); RBI.constrainGenericRegister(DstReg, *RC, MRI); } else { @@ -4826,7 +4782,7 @@ bool AArch64InstructionSelector::selectInsertElt( MachineInstr * AArch64InstructionSelector::emitConstantVector(Register Dst, Constant *CV, MachineIRBuilder &MIRBuilder, - MachineRegisterInfo &MRI) const { + MachineRegisterInfo &MRI) { LLT DstTy = MRI.getType(Dst); unsigned DstSize = DstTy.getSizeInBits(); if (CV->isNullValue()) { @@ -4862,7 +4818,7 @@ AArch64InstructionSelector::emitConstantVector(Register Dst, Constant *CV, } bool AArch64InstructionSelector::tryOptConstantBuildVec( - MachineInstr &I, LLT DstTy, MachineRegisterInfo &MRI) const { + MachineInstr &I, LLT DstTy, MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR); unsigned DstSize = DstTy.getSizeInBits(); assert(DstSize <= 128 && "Unexpected build_vec type!"); @@ -4886,15 +4842,14 @@ bool AArch64InstructionSelector::tryOptConstantBuildVec( return false; } Constant *CV = ConstantVector::get(Csts); - MachineIRBuilder MIB(I); if (!emitConstantVector(I.getOperand(0).getReg(), CV, MIB, MRI)) return false; I.eraseFromParent(); return true; } -bool AArch64InstructionSelector::selectBuildVector( - MachineInstr &I, MachineRegisterInfo &MRI) const { +bool AArch64InstructionSelector::selectBuildVector(MachineInstr &I, + MachineRegisterInfo &MRI) { assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR); // Until we port more of the optimized selections, for now just use a vector // insert sequence. @@ -4907,12 +4862,11 @@ bool AArch64InstructionSelector::selectBuildVector( if (EltSize < 16 || EltSize > 64) return false; // Don't support all element types yet. const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI); - MachineIRBuilder MIRBuilder(I); const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass; MachineInstr *ScalarToVec = emitScalarToVector(DstTy.getElementType().getSizeInBits(), DstRC, - I.getOperand(1).getReg(), MIRBuilder); + I.getOperand(1).getReg(), MIB); if (!ScalarToVec) return false; @@ -4926,7 +4880,7 @@ bool AArch64InstructionSelector::selectBuildVector( // Note that if we don't do a subregister copy, we can end up making an // extra register. PrevMI = &*emitLaneInsert(None, DstVec, I.getOperand(i).getReg(), i - 1, RB, - MIRBuilder); + MIB); DstVec = PrevMI->getOperand(0).getReg(); } @@ -4955,8 +4909,7 @@ bool AArch64InstructionSelector::selectBuildVector( Register Reg = MRI.createVirtualRegister(RC); Register DstReg = I.getOperand(0).getReg(); - MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {}) - .addReg(DstVec, 0, SubReg); + MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {}).addReg(DstVec, 0, SubReg); MachineOperand &RegOp = I.getOperand(1); RegOp.setReg(Reg); RBI.constrainGenericRegister(DstReg, *RC, MRI); @@ -4984,25 +4937,24 @@ static unsigned findIntrinsicID(MachineInstr &I) { } bool AArch64InstructionSelector::selectIntrinsicWithSideEffects( - MachineInstr &I, MachineRegisterInfo &MRI) const { + MachineInstr &I, MachineRegisterInfo &MRI) { // Find the intrinsic ID. unsigned IntrinID = findIntrinsicID(I); if (!IntrinID) return false; - MachineIRBuilder MIRBuilder(I); // Select the instruction. switch (IntrinID) { default: return false; case Intrinsic::trap: - MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1); + MIB.buildInstr(AArch64::BRK, {}, {}).addImm(1); break; case Intrinsic::debugtrap: - MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000); + MIB.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000); break; case Intrinsic::ubsantrap: - MIRBuilder.buildInstr(AArch64::BRK, {}, {}) + MIB.buildInstr(AArch64::BRK, {}, {}) .addImm(I.getOperand(1).getImm() | ('U' << 8)); break; } @@ -5016,7 +4968,6 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I, unsigned IntrinID = findIntrinsicID(I); if (!IntrinID) return false; - MachineIRBuilder MIRBuilder(I); switch (IntrinID) { default: @@ -5034,7 +4985,7 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I, // the source and destination if they are on GPRs. if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) { SrcReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); - MIRBuilder.buildCopy({SrcReg}, {I.getOperand(2)}); + MIB.buildCopy({SrcReg}, {I.getOperand(2)}); // Make sure the copy ends up getting constrained properly. RBI.constrainGenericRegister(I.getOperand(2).getReg(), @@ -5045,14 +4996,14 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I, DstReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); // Actually insert the instruction. - auto SHA1Inst = MIRBuilder.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg}); + auto SHA1Inst = MIB.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg}); constrainSelectedInstRegOperands(*SHA1Inst, TII, TRI, RBI); // Did we create a new register for the destination? if (DstReg != I.getOperand(0).getReg()) { // Yep. Copy the result of the instruction back into the original // destination. - MIRBuilder.buildCopy({I.getOperand(0)}, {DstReg}); + MIB.buildCopy({I.getOperand(0)}, {DstReg}); RBI.constrainGenericRegister(I.getOperand(0).getReg(), AArch64::GPR32RegClass, MRI); } @@ -5079,11 +5030,11 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I, } if (STI.hasPAuth()) { - MIRBuilder.buildInstr(AArch64::XPACI, {DstReg}, {MFReturnAddr}); + MIB.buildInstr(AArch64::XPACI, {DstReg}, {MFReturnAddr}); } else { - MIRBuilder.buildCopy({Register(AArch64::LR)}, {MFReturnAddr}); - MIRBuilder.buildInstr(AArch64::XPACLRI); - MIRBuilder.buildCopy({DstReg}, {Register(AArch64::LR)}); + MIB.buildCopy({Register(AArch64::LR)}, {MFReturnAddr}); + MIB.buildInstr(AArch64::XPACLRI); + MIB.buildCopy({DstReg}, {Register(AArch64::LR)}); } I.eraseFromParent(); @@ -5095,25 +5046,25 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I, while (Depth--) { Register NextFrame = MRI.createVirtualRegister(&AArch64::GPR64spRegClass); auto Ldr = - MIRBuilder.buildInstr(AArch64::LDRXui, {NextFrame}, {FrameAddr}) - .addImm(0); + MIB.buildInstr(AArch64::LDRXui, {NextFrame}, {FrameAddr}).addImm(0); constrainSelectedInstRegOperands(*Ldr, TII, TRI, RBI); FrameAddr = NextFrame; } if (IntrinID == Intrinsic::frameaddress) - MIRBuilder.buildCopy({DstReg}, {FrameAddr}); + MIB.buildCopy({DstReg}, {FrameAddr}); else { MFI.setReturnAddressIsTaken(true); if (STI.hasPAuth()) { Register TmpReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - MIRBuilder.buildInstr(AArch64::LDRXui, {TmpReg}, {FrameAddr}).addImm(1); - MIRBuilder.buildInstr(AArch64::XPACI, {DstReg}, {TmpReg}); + MIB.buildInstr(AArch64::LDRXui, {TmpReg}, {FrameAddr}).addImm(1); + MIB.buildInstr(AArch64::XPACI, {DstReg}, {TmpReg}); } else { - MIRBuilder.buildInstr(AArch64::LDRXui, {Register(AArch64::LR)}, {FrameAddr}).addImm(1); - MIRBuilder.buildInstr(AArch64::XPACLRI); - MIRBuilder.buildCopy({DstReg}, {Register(AArch64::LR)}); + MIB.buildInstr(AArch64::LDRXui, {Register(AArch64::LR)}, {FrameAddr}) + .addImm(1); + MIB.buildInstr(AArch64::XPACLRI); + MIB.buildCopy({DstReg}, {Register(AArch64::LR)}); } }