From 0ba8886af56198083b8cfd580e6de60444a2038d Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 6 Sep 2022 15:49:13 +0200 Subject: [PATCH] [FastISel] Propagate PCSections metadata to MachineInstr Propagate PC sections metadata to MachineInstr when FastISel is doing instruction selection. Reviewed By: vitalybuka Differential Revision: https://reviews.llvm.org/D130884 --- llvm/include/llvm/CodeGen/FastISel.h | 5 +- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp | 108 +++++------ llvm/lib/Target/AArch64/AArch64FastISel.cpp | 142 +++++++------- llvm/lib/Target/ARM/ARMFastISel.cpp | 142 +++++++------- llvm/lib/Target/Mips/MipsFastISel.cpp | 24 +-- llvm/lib/Target/PowerPC/PPCFastISel.cpp | 110 +++++------ .../lib/Target/WebAssembly/WebAssemblyFastISel.cpp | 50 ++--- llvm/lib/Target/X86/X86FastISel.cpp | 204 ++++++++++----------- llvm/utils/TableGen/FastISelEmitter.cpp | 2 +- 9 files changed, 394 insertions(+), 393 deletions(-) diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h index 8be97d2..fd779d2 100644 --- a/llvm/include/llvm/CodeGen/FastISel.h +++ b/llvm/include/llvm/CodeGen/FastISel.h @@ -18,6 +18,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" @@ -204,7 +205,7 @@ protected: MachineRegisterInfo &MRI; MachineFrameInfo &MFI; MachineConstantPool &MCP; - DebugLoc DbgLoc; + MIMetadata MIMD; const TargetMachine &TM; const DataLayout &DL; const TargetInstrInfo &TII; @@ -247,7 +248,7 @@ public: void finishBasicBlock(); /// Return current debug location information. - DebugLoc getCurDebugLoc() const { return DbgLoc; } + DebugLoc getCurDebugLoc() const { return MIMD.getDL(); } /// Do "fast" instruction selection for function arguments and append /// the machine instructions to the current block. Returns true when diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index dc8688b..4f50837 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -319,7 +319,7 @@ Register FastISel::materializeConstant(const Value *V, MVT VT) { Reg = lookUpRegForValue(Op); } else if (isa(V)) { Reg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), Reg); } return Reg; @@ -696,20 +696,20 @@ bool FastISel::selectStackmap(const CallInst *I) { // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); auto Builder = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)); const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) Builder.addImm(0); // Issue STACKMAP. - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::STACKMAP)); for (auto const &MO : Ops) MIB.add(MO); // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(0) .addImm(0); @@ -878,7 +878,7 @@ bool FastISel::selectPatchpoint(const CallInst *I) { /*isImp=*/true)); // Insert the patchpoint instruction before the call generated by the target. - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, MIMD, TII.get(TargetOpcode::PATCHPOINT)); for (auto &MO : Ops) @@ -907,7 +907,7 @@ bool FastISel::selectXRayCustomEvent(const CallInst *I) { Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), /*isDef=*/false)); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); for (auto &MO : Ops) MIB.add(MO); @@ -928,7 +928,7 @@ bool FastISel::selectXRayTypedEvent(const CallInst *I) { Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)), /*isDef=*/false)); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL)); for (auto &MO : Ops) MIB.add(MO); @@ -1170,7 +1170,7 @@ bool FastISel::selectCall(const User *I) { ExtraInfo |= InlineAsm::Extra_IsConvergent; ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::INLINEASM)); MIB.addExternalSymbol(IA->getAsmString().c_str()); MIB.addImm(ExtraInfo); @@ -1250,12 +1250,12 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { false); if (Op) { - assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && "Expected inlined-at fields to agree"); // A dbg.declare describes the address of a source variable, so lower it // into an indirect DBG_VALUE. auto Builder = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, DI->getVariable(), DI->getExpression()); @@ -1282,12 +1282,12 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { const DbgValueInst *DI = cast(II); const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const Value *V = DI->getValue(); - assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && "Expected inlined-at fields to agree"); if (!V || isa(V) || DI->hasArgList()) { // DI is either undef or cannot produce a valid DBG_VALUE, so produce an // undef DBG_VALUE to terminate any prior location. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, false, 0U, DI->getVariable(), DI->getExpression()); } else if (const auto *CI = dyn_cast(V)) { // See if there's an expression to constant-fold. @@ -1295,19 +1295,19 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { if (Expr) std::tie(Expr, CI) = Expr->constantFold(CI); if (CI->getBitWidth() > 64) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addCImm(CI) .addImm(0U) .addMetadata(DI->getVariable()) .addMetadata(Expr); else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addImm(CI->getZExtValue()) .addImm(0U) .addMetadata(DI->getVariable()) .addMetadata(Expr); } else if (const auto *CF = dyn_cast(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addFPImm(CF) .addImm(0U) .addMetadata(DI->getVariable()) @@ -1316,8 +1316,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { // FIXME: This does not handle register-indirect values at offset 0. bool IsIndirect = false; auto Builder = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, - DI->getVariable(), DI->getExpression()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, + IsIndirect, Reg, DI->getVariable(), DI->getExpression()); // If using instruction referencing, mutate this into a DBG_INSTR_REF, // to be later patched up by finalizeDebugInstrRefs. @@ -1339,7 +1339,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { return true; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel()); return true; } @@ -1447,7 +1447,7 @@ bool FastISel::selectFreeze(const User *I) { MVT Ty = ETy.getSimpleVT(); const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty); Register ResultReg = createResultReg(TyRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); updateValueMap(I, ResultReg); @@ -1499,7 +1499,7 @@ bool FastISel::selectInstruction(const Instruction *I) { if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) return false; - DbgLoc = I->getDebugLoc(); + MIMD = MIMetadata(*I); SavedInsertPt = FuncInfo.InsertPt; @@ -1524,7 +1524,7 @@ bool FastISel::selectInstruction(const Instruction *I) { if (!SkipTargetIndependentISel) { if (selectOperator(I, I->getOpcode())) { ++NumFastIselSuccessIndependent; - DbgLoc = DebugLoc(); + MIMD = {}; return true; } // Remove dead code. @@ -1536,7 +1536,7 @@ bool FastISel::selectInstruction(const Instruction *I) { // Next, try calling the target to attempt to handle the instruction. if (fastSelectInstruction(I)) { ++NumFastIselSuccessTarget; - DbgLoc = DebugLoc(); + MIMD = {}; return true; } // Remove dead code. @@ -1544,7 +1544,7 @@ bool FastISel::selectInstruction(const Instruction *I) { if (SavedInsertPt != FuncInfo.InsertPt) removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); - DbgLoc = DebugLoc(); + MIMD = {}; // Undo phi node updates, because they will be added again by SelectionDAG. if (I->isTerminator()) { // PHI node handling may have generated local value instructions. @@ -1592,7 +1592,7 @@ void FastISel::finishCondBranch(const BasicBlock *BranchBB, FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); } - fastEmitBranch(FalseMBB, DbgLoc); + fastEmitBranch(FalseMBB, MIMD.getDL()); } /// Emit an FNeg operation. @@ -1905,7 +1905,7 @@ Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op, // If it's not legal to COPY between the register classes, something // has gone very wrong before we got here. Register NewOp = createResultReg(RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), NewOp).addReg(Op); return NewOp; } @@ -1918,7 +1918,7 @@ Register FastISel::fastEmitInst_(unsigned MachineInstOpcode, Register ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg); return ResultReg; } @@ -1930,12 +1930,12 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } @@ -1952,14 +1952,14 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -1976,16 +1976,16 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addReg(Op2); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1) .addReg(Op2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2000,14 +2000,14 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addImm(Imm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2022,16 +2022,16 @@ Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addImm(Imm1) .addImm(Imm2); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addImm(Imm1) .addImm(Imm2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2045,12 +2045,12 @@ Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode, Register ResultReg = createResultReg(RC); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addFPImm(FPImm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addFPImm(FPImm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2066,16 +2066,16 @@ Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1) .addImm(Imm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2087,11 +2087,11 @@ Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2104,7 +2104,7 @@ Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, "Cannot yet extract from physregs"); const TargetRegisterClass *RC = MRI.getRegClass(Op0); MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0, 0, Idx); return ResultReg; } @@ -2169,9 +2169,9 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { // Set the DebugLoc for the copy. Use the location of the operand if // there is one; otherwise no location, flushLocalValueMap will fix it. - DbgLoc = DebugLoc(); + MIMD = {}; if (const auto *Inst = dyn_cast(PHIOp)) - DbgLoc = Inst->getDebugLoc(); + MIMD = MIMetadata(*Inst); Register Reg = getRegForValue(PHIOp); if (!Reg) { @@ -2179,7 +2179,7 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { return false; } FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg)); - DbgLoc = DebugLoc(); + MIMD = {}; } } diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 04119aa..da7f431 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -356,7 +356,7 @@ unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) { if (SI != FuncInfo.StaticAllocaMap.end()) { Register ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addFrameIndex(SI->second) .addImm(0) @@ -379,7 +379,7 @@ unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) { : &AArch64::GPR32RegClass; unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(ZeroReg, getKillRegState(true)); return ResultReg; } @@ -411,11 +411,11 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) { &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; Register TmpReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc1), TmpReg) .addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue()); Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(TmpReg, getKillRegState(true)); @@ -428,12 +428,12 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) { unsigned CPI = MCP.getConstantPoolIndex(cast(CFP), Alignment); Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg).addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGE); unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui; Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(ADRPReg) .addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); return ResultReg; @@ -460,7 +460,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) { if (OpFlags & AArch64II::MO_GOT) { // ADRP + LDRX - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg) .addGlobalAddress(GV, 0, AArch64II::MO_PAGE | OpFlags); @@ -472,7 +472,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) { ResultReg = createResultReg(&AArch64::GPR64RegClass); LdrOpc = AArch64::LDRXui; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(LdrOpc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(LdrOpc), ResultReg) .addReg(ADRPReg) .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF | @@ -483,7 +483,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) { // LDRWui produces a 32-bit register, but pointers in-register are 64-bits // so we must extend the result on ILP32. Register Result64 = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG)) .addDef(Result64) .addImm(0) @@ -492,12 +492,12 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) { return Result64; } else { // ADRP + ADDX - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg) .addGlobalAddress(GV, 0, AArch64II::MO_PAGE | OpFlags); ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addReg(ADRPReg) .addGlobalAddress(GV, 0, @@ -1035,7 +1035,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase()) { Register ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addFrameIndex(Addr.getFI()) .addImm(0) @@ -1308,7 +1308,7 @@ unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg, const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addReg(RHSReg); return ResultReg; @@ -1352,7 +1352,7 @@ unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg, const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addImm(Imm) .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm)); @@ -1394,7 +1394,7 @@ unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg, const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addReg(RHSReg) .addImm(getShifterImm(ShiftType, ShiftImm)); @@ -1438,7 +1438,7 @@ unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg, const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addReg(RHSReg) .addImm(getArithExtendImm(ExtType, ShiftImm)); @@ -1495,7 +1495,7 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) { if (UseImm) { unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(LHSReg); return true; } @@ -1505,7 +1505,7 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) { return false; unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(LHSReg) .addReg(RHSReg); return true; @@ -1842,7 +1842,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, // Create the base instruction, then add the operands. Register ResultReg = createResultReg(RC); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO); @@ -1857,7 +1857,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, // the 32bit reg to a 64bit reg. if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) { Register Reg64 = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Reg64) .addImm(0) .addReg(ResultReg, getKillRegState(true)) @@ -2048,7 +2048,7 @@ bool AArch64FastISel::emitStoreRelease(MVT VT, unsigned SrcReg, const MCInstrDesc &II = TII.get(Opc); SrcReg = constrainOperandRegClass(II, SrcReg, 0); AddrReg = constrainOperandRegClass(II, AddrReg, 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(SrcReg) .addReg(AddrReg) .addMemOperand(MMO); @@ -2117,7 +2117,7 @@ bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, const MCInstrDesc &II = TII.get(Opc); SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs()); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(SrcReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addReg(SrcReg); addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, ScaleFactor, MMO); return true; @@ -2352,7 +2352,7 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { // Emit the combined compare and branch instruction. SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs()); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg); if (IsBitTest) MIB.addImm(TestBit); @@ -2381,10 +2381,10 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { default: break; case CmpInst::FCMP_FALSE: - fastEmitBranch(FBB, DbgLoc); + fastEmitBranch(FBB, MIMD.getDL()); return true; case CmpInst::FCMP_TRUE: - fastEmitBranch(TBB, DbgLoc); + fastEmitBranch(TBB, MIMD.getDL()); return true; } @@ -2422,13 +2422,13 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { // Emit the extra branch for FCMP_UEQ and FCMP_ONE. if (ExtraCC != AArch64CC::AL) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::Bcc)) .addImm(ExtraCC) .addMBB(TBB); } // Emit the branch. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::Bcc)) .addImm(CC) .addMBB(TBB); @@ -2438,7 +2438,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { } else if (const auto *CI = dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::B)) .addMBB(Target); // Obtain the branch probability and add the target to the successor list. @@ -2459,7 +2459,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { return false; // Emit the branch. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::Bcc)) .addImm(CC) .addMBB(TBB); @@ -2482,7 +2482,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { const MCInstrDesc &II = TII.get(Opcode); Register ConstrainedCondReg = constrainOperandRegClass(II, CondReg, II.getNumDefs()); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(ConstrainedCondReg) .addImm(0) .addMBB(TBB); @@ -2500,7 +2500,7 @@ bool AArch64FastISel::selectIndirectBr(const Instruction *I) { // Emit the indirect branch. const MCInstrDesc &II = TII.get(AArch64::BR); AddrReg = constrainOperandRegClass(II, AddrReg, II.getNumDefs()); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(AddrReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addReg(AddrReg); // Make sure the CFG is up-to-date. for (const auto *Succ : BI->successors()) @@ -2524,7 +2524,7 @@ bool AArch64FastISel::selectCmp(const Instruction *I) { break; case CmpInst::FCMP_FALSE: ResultReg = createResultReg(&AArch64::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(AArch64::WZR, getKillRegState(true)); break; @@ -2564,12 +2564,12 @@ bool AArch64FastISel::selectCmp(const Instruction *I) { if (CondCodes) { Register TmpReg1 = createResultReg(&AArch64::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr), TmpReg1) .addReg(AArch64::WZR, getKillRegState(true)) .addReg(AArch64::WZR, getKillRegState(true)) .addImm(CondCodes[0]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr), ResultReg) .addReg(TmpReg1, getKillRegState(true)) .addReg(AArch64::WZR, getKillRegState(true)) @@ -2583,7 +2583,7 @@ bool AArch64FastISel::selectCmp(const Instruction *I) { AArch64CC::CondCode CC = getCompareCC(Predicate); assert((CC != AArch64CC::AL) && "Unexpected condition code."); AArch64CC::CondCode invertedCC = getInvertedCondCode(CC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr), ResultReg) .addReg(AArch64::WZR, getKillRegState(true)) .addReg(AArch64::WZR, getKillRegState(true)) @@ -2747,7 +2747,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) { CondReg = constrainOperandRegClass(II, CondReg, 1); // Emit a TST instruction (ANDS wzr, reg, #imm). - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, AArch64::WZR) .addReg(CondReg) .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); @@ -2777,7 +2777,7 @@ bool AArch64FastISel::selectFPExt(const Instruction *I) { return false; Register ResultReg = createResultReg(&AArch64::FPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::FCVTDSr), ResultReg).addReg(Op); updateValueMap(I, ResultReg); return true; @@ -2793,7 +2793,7 @@ bool AArch64FastISel::selectFPTrunc(const Instruction *I) { return false; Register ResultReg = createResultReg(&AArch64::FPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::FCVTSDr), ResultReg).addReg(Op); updateValueMap(I, ResultReg); return true; @@ -2827,7 +2827,7 @@ bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) { } Register ResultReg = createResultReg( DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg); updateValueMap(I, ResultReg); return true; @@ -2980,7 +2980,7 @@ bool AArch64FastISel::fastLowerArguments() { // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); updateValueMap(&Arg, ResultReg); @@ -3001,7 +3001,7 @@ bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI, // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)) .addImm(NumBytes).addImm(0); // Process the args. @@ -3041,7 +3041,7 @@ bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI, // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); CLI.OutRegs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { @@ -3084,7 +3084,7 @@ bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(0); // Now the return value. @@ -3105,7 +3105,7 @@ bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, return false; Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(RVLocs[0].getLocReg()); CLI.InRegs.push_back(RVLocs[0].getLocReg()); @@ -3215,7 +3215,7 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) { if (Subtarget->useSmallAddressing()) { const MCInstrDesc &II = TII.get(Addr.getReg() ? getBLRCallOpcode(*MF) : (unsigned)AArch64::BL); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II); if (Symbol) MIB.addSym(Symbol, 0); else if (Addr.getGlobalValue()) @@ -3229,12 +3229,12 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) { unsigned CallReg = 0; if (Symbol) { Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg) .addSym(Symbol, AArch64II::MO_GOT | AArch64II::MO_PAGE); CallReg = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::LDRXui), CallReg) .addReg(ADRPReg) .addSym(Symbol, @@ -3249,7 +3249,7 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) { const MCInstrDesc &II = TII.get(getBLRCallOpcode(*MF)); CallReg = constrainOperandRegClass(II, CallReg, 0); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(CallReg); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addReg(CallReg); } // Add implicit physical register uses to the call. @@ -3426,7 +3426,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); Register SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), SrcReg).addReg(FramePtr); // Recursively load frame address // ldr x0, [fp] @@ -3451,7 +3451,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // SP = FP + Fixed Object + 16 int FI = MFI.CreateFixedObject(4, 0, false); Register ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addFrameIndex(FI) .addImm(0) @@ -3584,17 +3584,17 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { if (!SrcReg) return false; Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg); updateValueMap(II, ResultReg); return true; } case Intrinsic::trap: - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::BRK)) .addImm(1); return true; case Intrinsic::debugtrap: - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::BRK)) .addImm(0xF000); return true; @@ -3724,7 +3724,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { if (VT == MVT::i32) { MulReg = emitUMULL_rr(MVT::i64, LHSReg, RHSReg); // tst xreg, #0xffffffff00000000 - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ANDSXri), AArch64::XZR) .addReg(MulReg) .addImm(AArch64_AM::encodeLogicalImmediate(0xFFFFFFFF00000000, 64)); @@ -3743,7 +3743,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { if (MulReg) { ResultReg1 = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg); } @@ -3855,14 +3855,14 @@ bool AArch64FastISel::selectRet(const Instruction *I) { SrcReg = emitAnd_ri(MVT::i64, SrcReg, 0xffffffff); // Make the copy. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); // Add register to return instruction. RetRegs.push_back(VA.getLocReg()); } - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::RET_ReallyLR)); for (unsigned RetReg : RetRegs) MIB.addReg(RetReg, RegState::Implicit); @@ -3925,7 +3925,7 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) { assert(ResultReg && "Unexpected AND instruction emission failure."); } else { ResultReg = createResultReg(&AArch64::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(SrcReg); } @@ -3949,7 +3949,7 @@ unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) { // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd. Register Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Reg64) .addImm(0) .addReg(ResultReg) @@ -4046,7 +4046,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, if (Shift == 0) { if (RetVT == SrcVT) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0); return ResultReg; @@ -4093,7 +4093,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { Register TmpReg = MRI.createVirtualRegister(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) .addReg(Op0) @@ -4149,7 +4149,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, if (Shift == 0) { if (RetVT == SrcVT) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0); return ResultReg; @@ -4209,7 +4209,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { Register TmpReg = MRI.createVirtualRegister(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) .addReg(Op0) @@ -4265,7 +4265,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, if (Shift == 0) { if (RetVT == SrcVT) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0); return ResultReg; @@ -4314,7 +4314,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { Register TmpReg = MRI.createVirtualRegister(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) .addReg(Op0) @@ -4372,7 +4372,7 @@ unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, DestVT = MVT::i32; else if (DestVT == MVT::i64) { Register Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Src64) .addImm(0) .addReg(SrcReg) @@ -4469,7 +4469,7 @@ bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT, if (IsZExt) { Register Reg64 = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Reg64) .addImm(0) .addReg(Reg, getKillRegState(true)) @@ -4512,7 +4512,7 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) { if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) { if (RetVT == MVT::i64 && SrcVT != MVT::i64) { Register ResultReg = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), ResultReg) .addImm(0) .addReg(SrcReg) @@ -5007,20 +5007,20 @@ bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) { const Register ScratchReg = createResultReg(&AArch64::GPR32RegClass); // FIXME: MachineMemOperand doesn't support cmpxchg yet. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addDef(ResultReg1) .addDef(ScratchReg) .addUse(AddrReg) .addUse(DesiredReg) .addUse(NewReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CmpOpc)) .addDef(VT == MVT::i32 ? AArch64::WZR : AArch64::XZR) .addUse(ResultReg1) .addUse(DesiredReg) .addImm(0); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr)) .addDef(ResultReg2) .addUse(AArch64::WZR) .addUse(AArch64::WZR) diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 0ca1a61..5f04ded 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -303,12 +303,12 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, // for this instruction. Op0 = constrainOperandRegClass(II, Op0, 1); if (II.getNumDefs() >= 1) { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg).addReg(Op0)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -328,14 +328,14 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) { AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -353,14 +353,14 @@ unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, Op0 = constrainOperandRegClass(II, Op0, 1); if (II.getNumDefs() >= 1) { AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addImm(Imm)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -374,12 +374,12 @@ unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode, const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg).addImm(Imm)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -392,7 +392,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { if (VT == MVT::f64) return 0; Register MoveReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVSR), MoveReg) .addReg(SrcReg)); return MoveReg; @@ -402,7 +402,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { if (VT == MVT::i64) return 0; Register MoveReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVRS), MoveReg) .addReg(SrcReg)); return MoveReg; @@ -428,7 +428,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { Opc = ARM::FCONSTS; } Register DestReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg).addImm(Imm)); return DestReg; } @@ -444,7 +444,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { // The extra reg is for addrmode5. AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addConstantPoolIndex(Idx) .addReg(0)); return DestReg; @@ -462,7 +462,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; Register ImmReg = createResultReg(RC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ImmReg) .addImm(CI->getZExtValue())); return ImmReg; @@ -478,7 +478,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; Register ImmReg = createResultReg(RC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ImmReg) .addImm(Imm)); return ImmReg; @@ -501,13 +501,13 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { unsigned Idx = MCP.getConstantPoolIndex(C, Alignment); ResultReg = createResultReg(TLI.getRegClassFor(VT)); if (isThumb2) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::t2LDRpci), ResultReg) .addConstantPoolIndex(Idx)); else { // The extra immediate is for addrmode2. ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::LDRcp), ResultReg) .addConstantPoolIndex(Idx) .addImm(0)); @@ -551,7 +551,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; else Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); } else { // MachineConstantPool wants an explicit alignment. @@ -572,7 +572,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { MachineInstrBuilder MIB; if (isThumb2) { unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg).addConstantPoolIndex(Idx); if (IsPositionIndependent) MIB.addImm(Id); @@ -580,7 +580,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { } else { // The extra immediate is for addrmode2. DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::LDRcp), DestReg) .addConstantPoolIndex(Idx) .addImm(0); @@ -591,7 +591,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { Register NewDestReg = createResultReg(TLI.getRegClassFor(VT)); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DbgLoc, TII.get(Opc), NewDestReg) + MIMD, TII.get(Opc), NewDestReg) .addReg(DestReg) .addImm(Id); AddOptionalDefs(MIB); @@ -605,12 +605,12 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { MachineInstrBuilder MIB; Register NewDestReg = createResultReg(TLI.getRegClassFor(VT)); if (isThumb2) - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::t2LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); else - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); @@ -658,7 +658,7 @@ unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) { Register ResultReg = createResultReg(RC); ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(SI->second) .addImm(0)); @@ -832,7 +832,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { : &ARM::GPRRegClass; Register ResultReg = createResultReg(RC); unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(Addr.Base.FI) .addImm(0)); @@ -985,7 +985,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, if (allocReg) ResultReg = createResultReg(RC); assert(ResultReg > 255 && "Expected an allocated virtual register."); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); @@ -993,7 +993,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, // load. Now we must move from the GRP to the FP register. if (needVMOV) { Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVSR), MoveReg) .addReg(ResultReg)); ResultReg = MoveReg; @@ -1049,7 +1049,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, : &ARM::GPRRegClass); unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), Res) .addReg(SrcReg).addImm(1)); SrcReg = Res; @@ -1099,7 +1099,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, // Unaligned stores need special handling. Floats require word-alignment. if (Alignment && *Alignment < Align(4)) { Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVRS), MoveReg) .addReg(SrcReg)); SrcReg = MoveReg; @@ -1125,7 +1125,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, // Create the base instruction, then add the operands. SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(StrOpc)) .addReg(SrcReg); AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); @@ -1250,7 +1250,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { return false; unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc)) .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); finishCondBranch(BI->getParent(), TBB, FBB); return true; @@ -1262,7 +1262,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; Register OpReg = getRegForValue(TI->getOperand(0)); OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc)) .addReg(OpReg).addImm(1)); @@ -1273,7 +1273,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { } unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc)) .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); finishCondBranch(BI->getParent(), TBB, FBB); @@ -1283,7 +1283,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - fastEmitBranch(Target, DbgLoc); + fastEmitBranch(Target, MIMD.getDL()); return true; } @@ -1300,7 +1300,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc)) .addReg(CmpReg) .addImm(1)); @@ -1311,7 +1311,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { } unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc)) .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); finishCondBranch(BI->getParent(), TBB, FBB); return true; @@ -1324,7 +1324,7 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) { unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; assert(isThumb2 || Subtarget->hasV4TOps()); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)).addReg(AddrReg)); const IndirectBrInst *IB = cast(I); @@ -1432,11 +1432,11 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); if (!UseImm) { SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(SrcReg1).addReg(SrcReg2)); } else { MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(SrcReg1); // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. @@ -1448,7 +1448,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, // For floating point we need to move the result to a comparison register // that we can then use for branches. if (Ty->isFloatTy() || Ty->isDoubleTy()) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::FMSTAT))); return true; } @@ -1475,7 +1475,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); unsigned ZeroReg = fastMaterializeConstant(Zero); // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), DestReg) .addReg(ZeroReg).addImm(1) .addImm(ARMPred).addReg(ARM::CPSR); @@ -1495,7 +1495,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) { if (Op == 0) return false; Register Result = createResultReg(&ARM::DPRRegClass); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VCVTDS), Result) .addReg(Op)); updateValueMap(I, Result); @@ -1514,7 +1514,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) { if (Op == 0) return false; Register Result = createResultReg(&ARM::SPRRegClass); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VCVTSD), Result) .addReg(Op)); updateValueMap(I, Result); @@ -1560,7 +1560,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { else return false; Register ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg).addReg(FP)); updateValueMap(I, ResultReg); return true; @@ -1587,7 +1587,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. Register ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg).addReg(Op)); // This result needs to be in an integer register, but the conversion only @@ -1636,7 +1636,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) { unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0); AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc)) .addReg(CondReg) .addImm(1)); @@ -1656,7 +1656,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) { if (!UseImm) { Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), ResultReg) .addReg(Op2Reg) .addReg(Op1Reg) @@ -1664,7 +1664,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) { .addReg(ARM::CPSR); } else { Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), ResultReg) .addReg(Op1Reg) .addImm(Imm) @@ -1766,7 +1766,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { Register ResultReg = createResultReg(&ARM::GPRnopcRegClass); SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg1).addReg(SrcReg2)); updateValueMap(I, ResultReg); @@ -1813,7 +1813,7 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { if (Op2 == 0) return false; Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(Op1).addReg(Op2)); updateValueMap(I, ResultReg); @@ -1932,7 +1932,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)) .addImm(NumBytes).addImm(0)); @@ -1977,7 +1977,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); RegArgs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { @@ -1991,7 +1991,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, assert(VA.isRegLoc() && NextVA.isRegLoc() && "We only handle register args!"); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVRRD), VA.getLocReg()) .addReg(NextVA.getLocReg(), RegState::Define) .addReg(Arg)); @@ -2023,7 +2023,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, unsigned &NumBytes, bool isVarArg) { // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(-1ULL)); @@ -2040,7 +2040,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, MVT DestVT = RVLocs[0].getValVT(); const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); Register ResultReg = createResultReg(DstRC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVDRR), ResultReg) .addReg(RVLocs[0].getLocReg()) .addReg(RVLocs[1].getLocReg())); @@ -2061,7 +2061,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); Register ResultReg = createResultReg(DstRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(RVLocs[0].getLocReg()); UsedRegs.push_back(RVLocs[0].getLocReg()); @@ -2147,7 +2147,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); // Add register to return instruction. @@ -2163,7 +2163,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { else RetOpc = Subtarget->getReturnOpcode(); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(RetOpc)); AddOptionalDefs(MIB); for (unsigned R : RetRegs) @@ -2261,7 +2261,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { // Issue the call. unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls()); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DbgLoc, TII.get(CallOpc)); + MIMD, TII.get(CallOpc)); // BL / BLX don't take a predicate, but tBL / tBLX do. if (isThumb2) MIB.add(predOps(ARMCC::AL)); @@ -2402,7 +2402,7 @@ bool ARMFastISel::SelectCall(const Instruction *I, // Issue the call. unsigned CallOpc = ARMSelectCallOp(UseReg); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DbgLoc, TII.get(CallOpc)); + MIMD, TII.get(CallOpc)); // ARM calls don't take a predicate, but tBL / tBLX do. if(isThumb2) @@ -2508,7 +2508,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { unsigned Depth = cast(I.getOperand(0))->getZExtValue(); while (Depth--) { DestReg = createResultReg(RC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(LdrOpc), DestReg) .addReg(SrcReg).addImm(0)); SrcReg = DestReg; @@ -2571,7 +2571,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { Opcode = ARM::tTRAP; else Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode)); return true; } } @@ -2723,7 +2723,7 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; bool isKill = 1 == Instr; MachineInstrBuilder MIB = BuildMI( - *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); + *FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode), ResultReg); if (setsCPSR) MIB.addReg(ARM::CPSR, RegState::Define); SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); @@ -2803,7 +2803,7 @@ bool ARMFastISel::SelectShift(const Instruction *I, Register ResultReg = createResultReg(&ARM::GPRnopcRegClass); if(ResultReg == 0) return false; - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(Reg1); @@ -2970,7 +2970,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) { Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), TempReg) .addConstantPoolIndex(Idx) .addMemOperand(CPMMO); if (Opc == ARM::LDRcp) @@ -2982,7 +2982,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) { Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR : ARM::PICADD; DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(TempReg) .addImm(ARMPCLabelIndex); @@ -2991,7 +2991,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) { if (UseGOT_PREL && Subtarget->isThumb()) { Register NewDestReg = createResultReg(TLI.getRegClassFor(VT)); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::t2LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); @@ -3065,7 +3065,7 @@ bool ARMFastISel::fastLowerArguments() { // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(DstReg, getKillRegState(true)); updateValueMap(&Arg, ResultReg); diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index c1b8af7..7533c12 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -204,11 +204,11 @@ private: unsigned materializeExternalCallSym(MCSymbol *Syn); MachineInstrBuilder emitInst(unsigned Opc) { - return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); } MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) { - return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DstReg); } @@ -338,7 +338,7 @@ unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) { if (SI != FuncInfo.StaticAllocaMap.end()) { Register ResultReg = createResultReg(&Mips::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::LEA_ADDiu), ResultReg) .addFrameIndex(SI->second) .addImm(0); @@ -794,7 +794,7 @@ bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr) { MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align(4)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(FI) .addImm(Offset) .addMemOperand(MMO); @@ -843,7 +843,7 @@ bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr) { MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align(4)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg) .addFrameIndex(FI) .addImm(Offset) @@ -967,7 +967,7 @@ bool MipsFastISel::selectBranch(const Instruction *I) { return false; } - BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ)) + BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::BGTZ)) .addReg(ZExtCondReg) .addMBB(TBB); finishCondBranch(BI->getParent(), TBB, FBB); @@ -1221,7 +1221,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI, // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); CLI.OutRegs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { @@ -1291,7 +1291,7 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); if (!ResultReg) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(RVLocs[0].getLocReg()); CLI.InRegs.push_back(RVLocs[0].getLocReg()); @@ -1461,7 +1461,7 @@ bool MipsFastISel::fastLowerArguments() { // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(Allocation[ArgNo].RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); updateValueMap(&FormalArg, ResultReg); @@ -1550,7 +1550,7 @@ bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) { DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32); emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::JALR), Mips::RA).addReg(Mips::T9); // Add implicit physical register uses to the call. @@ -1756,7 +1756,7 @@ bool MipsFastISel::selectRet(const Instruction *I) { } // Make the copy. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); // Add register to return instruction. @@ -2127,7 +2127,7 @@ unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, const MCInstrDesc &II = TII.get(MachineInstOpcode); Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead) diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index e8d54b4..0225e67 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -151,7 +151,7 @@ class PPCFastISel final : public FastISel { unsigned SrcReg, unsigned Flag = 0, unsigned SubReg = 0) { Register TmpReg = createResultReg(ToRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg, Flag, SubReg); return TmpReg; } @@ -429,7 +429,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, bool &UseOffset, // register and continue. This should almost never happen. if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) { Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDI8), ResultReg).addFrameIndex(Addr.Base.FI).addImm(0); Addr.Base.Reg = ResultReg; Addr.BaseType = Address::RegBase; @@ -533,7 +533,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI), MFI.getObjectAlign(Addr.Base.FI)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO); // Base reg with offset in range. @@ -541,7 +541,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, // VSX only provides an indexed load. if (Is32VSXLoad || Is64VSXLoad) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addImm(Addr.Offset).addReg(Addr.Base.Reg); // Indexed form. @@ -568,7 +568,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, case PPC::SPELWZ: Opc = PPC::SPELWZX; break; } - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); // If we have an index register defined we use it in the store inst, @@ -679,7 +679,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) { MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI), MFI.getObjectAlign(Addr.Base.FI)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg) .addImm(Addr.Offset) .addFrameIndex(Addr.Base.FI) @@ -691,7 +691,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) { if (Is32VSXStore || Is64VSXStore) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg); // Indexed form. @@ -714,7 +714,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) { case PPC::SPESTW: Opc = PPC::SPESTWX; break; } - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg); // If we have an index register defined we use it in the store inst, @@ -788,7 +788,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) { CondReg, PPCPred)) return false; - BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC)) + BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BCC)) .addImm(Subtarget->hasSPE() ? PPC::PRED_SPE : PPCPred) .addReg(CondReg) .addMBB(TBB); @@ -799,7 +799,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) { dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - fastEmitBranch(Target, DbgLoc); + fastEmitBranch(Target, MIMD.getDL()); return true; } @@ -942,10 +942,10 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2, } if (!UseImm) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CmpOpc), DestReg) .addReg(SrcReg1).addReg(SrcReg2); else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CmpOpc), DestReg) .addReg(SrcReg1).addImm(Imm); return true; @@ -987,18 +987,18 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) { auto RC = MRI.getRegClass(SrcReg); if (Subtarget->hasSPE()) { DestReg = createResultReg(&PPC::GPRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::EFSCFD), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::EFSCFD), DestReg) .addReg(SrcReg); } else if (Subtarget->hasP8Vector() && isVSFRCRegClass(RC)) { DestReg = createResultReg(&PPC::VSSRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::XSRSP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::XSRSP), DestReg) .addReg(SrcReg); } else { SrcReg = copyRegToRegClass(&PPC::F8RCRegClass, SrcReg); DestReg = createResultReg(&PPC::F4RCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::FRSP), DestReg) .addReg(SrcReg); } @@ -1093,7 +1093,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) { Register DestReg = createResultReg(&PPC::SPERCRegClass); // Generate the convert. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(SrcReg); updateValueMap(I, DestReg); return true; @@ -1137,7 +1137,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) { Opc = IsSigned ? PPC::FCFID : PPC::FCFIDU; // Generate the convert. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(FPReg); updateValueMap(I, DestReg); @@ -1248,7 +1248,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) { } // Generate the convert. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(SrcReg); // Now move the integer value from a float register to an integer register. @@ -1344,7 +1344,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { } if (UseImm) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg1) .addImm(Imm); @@ -1362,7 +1362,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { if (ISDOpcode == ISD::SUB) std::swap(SrcReg1, SrcReg2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg1).addReg(SrcReg2); updateValueMap(I, ResultReg); return true; @@ -1415,7 +1415,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl &Args, NumBytes = std::max(NumBytes, LinkageSize + 64); // Issue CALLSEQ_START. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TII.getCallFrameSetupOpcode())) .addImm(NumBytes).addImm(0); @@ -1476,7 +1476,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl &Args, } else ArgReg = NextGPR++; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ArgReg).addReg(Arg); RegArgs.push_back(ArgReg); } @@ -1490,7 +1490,7 @@ bool PPCFastISel::finishCall(MVT RetVT, CallLoweringInfo &CLI, unsigned &NumByte CallingConv::ID CC = CLI.CallConv; // Issue CallSEQ_END. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TII.getCallFrameDestroyOpcode())) .addImm(NumBytes).addImm(0); @@ -1523,7 +1523,7 @@ bool PPCFastISel::finishCall(MVT RetVT, CallLoweringInfo &CLI, unsigned &NumByte // If necessary, round the floating result to single precision. } else if (CopyVT == MVT::f64) { ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::FRSP), ResultReg).addReg(SourcePhysReg); // If only the low half of a general register is needed, generate @@ -1657,13 +1657,13 @@ bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) { // the call we generate here will be erased by FastISel::selectPatchpoint, // so don't try very hard... if (CLI.IsPatchPoint) - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::NOP)); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::NOP)); else return false; } else { // Build direct call with NOP for TOC restore. // FIXME: We can and should optimize away the NOP for local calls. - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BL8_NOP)); // Add callee. MIB.addGlobalAddress(GV); @@ -1728,7 +1728,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) { unsigned SrcReg = PPCMaterializeInt(CI, MVT::i64, VA.getLocInfo() != CCValAssign::ZExt); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg); RetRegs.push_back(RetReg); @@ -1785,14 +1785,14 @@ bool PPCFastISel::SelectRet(const Instruction *I) { } } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), RetRegs[i]) .addReg(SrcReg); } } } - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BLR8)); for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) @@ -1822,7 +1822,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??"); Opc = PPC::EXTSW_32_64; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(SrcReg); // Unsigned 32-bit extensions use RLWINM. @@ -1834,7 +1834,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??"); MB = 16; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLWINM), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::RLWINM), DestReg) .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31); @@ -1847,7 +1847,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, MB = 48; else MB = 32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::RLDICL_32_64), DestReg) .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB); } @@ -1861,9 +1861,9 @@ bool PPCFastISel::SelectIndirectBr(const Instruction *I) { if (AddrReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::MTCTR8)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::MTCTR8)) .addReg(AddrReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCTR8)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BCTR8)); const IndirectBrInst *IB = cast(I); for (const BasicBlock *SuccBB : IB->successors()) @@ -2022,26 +2022,26 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) { PPCFuncInfo->setUsesTOCBasePtr(); // For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)). if (CModel == CodeModel::Small) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocCPT), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocCPT), TmpReg) .addConstantPoolIndex(Idx).addReg(PPC::X2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addImm(0).addReg(TmpReg).addMemOperand(MMO); } else { // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA8(X2, Idx)). - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDIStocHA8), TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx); // But for large code model, we must generate a LDtocL followed // by the LF[SD]. if (CModel == CodeModel::Large) { Register TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocL), TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addImm(0) .addReg(TmpReg2); } else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO) .addReg(TmpReg) .addMemOperand(MMO); @@ -2083,7 +2083,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) { PPCFuncInfo->setUsesTOCBasePtr(); // For small code model, generate a simple TOC load. if (CModel == CodeModel::Small) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtoc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtoc), DestReg) .addGlobalAddress(GV) .addReg(PPC::X2); @@ -2097,15 +2097,15 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) { // ADDItocL(ADDIStocHA8(%x2, GV), GV) // Either way, start with the ADDIStocHA8: Register HighPartReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDIStocHA8), HighPartReg).addReg(PPC::X2).addGlobalAddress(GV); if (Subtarget->isGVIndirectSymbol(GV)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocL), DestReg).addGlobalAddress(GV).addReg(HighPartReg); } else { // Otherwise generate the ADDItocL. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDItocL), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDItocL), DestReg).addReg(HighPartReg).addGlobalAddress(GV); } } @@ -2124,21 +2124,21 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm, bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass); if (isInt<16>(Imm)) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg) .addImm(Imm); else if (Lo) { // Both Lo and Hi have nonzero bits. Register TmpReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg) .addImm(Hi); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg) .addReg(TmpReg).addImm(Lo); } else // Just Hi bits. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg) .addImm(Hi); @@ -2178,7 +2178,7 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm, unsigned TmpReg2; if (Imm) { TmpReg2 = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLDICR), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::RLDICR), TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift); } else TmpReg2 = TmpReg1; @@ -2186,14 +2186,14 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm, unsigned TmpReg3, Hi, Lo; if ((Hi = (Remainder >> 16) & 0xFFFF)) { TmpReg3 = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORIS8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ORIS8), TmpReg3).addReg(TmpReg2).addImm(Hi); } else TmpReg3 = TmpReg2; if ((Lo = Remainder & 0xFFFF)) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORI8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ORI8), ResultReg).addReg(TmpReg3).addImm(Lo); return ResultReg; } @@ -2209,7 +2209,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT, // case first. if (VT == MVT::i1 && Subtarget->useCRBits()) { Register ImmReg = createResultReg(&PPC::CRBITRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CI->isZero() ? PPC::CRUNSET : PPC::CRSET), ImmReg); return ImmReg; } @@ -2229,7 +2229,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT, if (isInt<16>(Imm)) { unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI; Register ImmReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ImmReg) .addImm(Imm); return ImmReg; } @@ -2281,7 +2281,7 @@ unsigned PPCFastISel::fastMaterializeAlloca(const AllocaInst *AI) { if (SI != FuncInfo.StaticAllocaMap.end()) { Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDI8), ResultReg).addFrameIndex(SI->second).addImm(0); return ResultReg; } @@ -2391,7 +2391,7 @@ unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) { // case first. if (VT == MVT::i1 && Subtarget->useCRBits()) { Register ImmReg = createResultReg(&PPC::CRBITRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Imm == 0 ? PPC::CRUNSET : PPC::CRSET), ImmReg); return ImmReg; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index c21eee8..1bb2d6b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -388,7 +388,7 @@ void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) { : &WebAssembly::I32RegClass); unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), Reg) .addImm(0); Addr.setReg(Reg); } @@ -460,12 +460,12 @@ unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V, } Register Imm = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::CONST_I32), Imm) .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits())); Register Result = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::AND_I32), Result) .addReg(Reg) .addReg(Imm); @@ -490,18 +490,18 @@ unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V, } Register Imm = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::CONST_I32), Imm) .addImm(32 - MVT(From).getSizeInBits()); Register Left = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::SHL_I32), Left) .addReg(Reg) .addReg(Imm); Register Right = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::SHR_S_I32), Right) .addReg(Left) .addReg(Imm); @@ -519,7 +519,7 @@ unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V, Reg = zeroExtendToI32(Reg, V, From); Register Result = createResultReg(&WebAssembly::I64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::I64_EXTEND_U_I32), Result) .addReg(Reg); return Result; @@ -541,7 +541,7 @@ unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V, Reg = signExtendToI32(Reg, V, From); Register Result = createResultReg(&WebAssembly::I64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::I64_EXTEND_S_I32), Result) .addReg(Reg); return Result; @@ -580,7 +580,7 @@ unsigned WebAssemblyFastISel::notValue(unsigned Reg) { assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass); Register NotReg = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::EQZ_I32), NotReg) .addReg(Reg); return NotReg; @@ -588,7 +588,7 @@ unsigned WebAssemblyFastISel::notValue(unsigned Reg) { unsigned WebAssemblyFastISel::copyValue(unsigned Reg) { Register ResultReg = createResultReg(MRI.getRegClass(Reg)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(WebAssembly::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::COPY), ResultReg) .addReg(Reg); return ResultReg; @@ -604,7 +604,7 @@ unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) { : &WebAssembly::I32RegClass); unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::COPY_I64 : WebAssembly::COPY_I32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(SI->second); return ResultReg; } @@ -623,7 +623,7 @@ unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) { : &WebAssembly::I32RegClass); unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addGlobalAddress(GV); return ResultReg; } @@ -717,7 +717,7 @@ bool WebAssemblyFastISel::fastLowerArguments() { return false; } Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addImm(I); updateValueMap(&Arg, ResultReg); @@ -859,7 +859,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { return false; } - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); if (!IsVoid) MIB.addReg(ResultReg, RegState::Define); @@ -886,7 +886,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { // as 64-bit for uniformity with other pointer types. // See also: WebAssemblyISelLowering.cpp: LowerCallResults if (Subtarget->hasAddr64()) { - auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), DbgLoc, + auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), MIMD, TII.get(WebAssembly::I32_WRAP_I64)); Register Reg32 = createResultReg(&WebAssembly::I32RegClass); Wrap.addReg(Reg32, RegState::Define); @@ -961,7 +961,7 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) { } Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(TrueReg) .addReg(FalseReg) .addReg(CondReg); @@ -979,7 +979,7 @@ bool WebAssemblyFastISel::selectTrunc(const Instruction *I) { if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) { Register Result = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::I32_WRAP_I64), Result) .addReg(Reg); Reg = Result; @@ -1077,7 +1077,7 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) { return false; Register ResultReg = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(LHS) .addReg(RHS); updateValueMap(ICmp, ResultReg); @@ -1138,7 +1138,7 @@ bool WebAssemblyFastISel::selectFCmp(const Instruction *I) { } Register ResultReg = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(LHS) .addReg(RHS); @@ -1231,7 +1231,7 @@ bool WebAssemblyFastISel::selectLoad(const Instruction *I) { materializeLoadStoreOperands(Addr); Register ResultReg = createResultReg(RC); - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load)); @@ -1291,7 +1291,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) { if (VTIsi1) ValueReg = maskI1Value(ValueReg, Store->getValueOperand()); - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store)); @@ -1319,7 +1319,7 @@ bool WebAssemblyFastISel::selectBr(const Instruction *I) { if (Not) Opc = WebAssembly::BR_UNLESS; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addMBB(TBB) .addReg(CondReg); @@ -1334,7 +1334,7 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) { const auto *Ret = cast(I); if (Ret->getNumOperands() == 0) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::RETURN)); return true; } @@ -1379,14 +1379,14 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) { if (Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::RETURN)) .addReg(Reg); return true; } bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::UNREACHABLE)); return true; } diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index f12e978..ba3674e 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -467,7 +467,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM, ResultReg = createResultReg(RC); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addFullAddress(MIB, AM); if (MMO) MIB->addMemOperand(*FuncInfo.MF, MMO); @@ -496,7 +496,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM, case MVT::i1: { // Mask out all but lowest bit. Register AndResult = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::AND8ri), AndResult) .addReg(ValReg).addImm(1); ValReg = AndResult; @@ -643,7 +643,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM, // any bugs before. ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, Desc); addFullAddress(MIB, AM).addReg(ValReg); if (MMO) MIB->addMemOperand(*FuncInfo.MF, MMO); @@ -679,7 +679,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, if (Opc) { MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue() : CI->getZExtValue()); if (MMO) @@ -786,7 +786,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { LoadReg = createResultReg(RC); MachineInstrBuilder LoadMI = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), LoadReg); addFullAddress(LoadMI, StubAM); // Ok, back to normal mode. @@ -1085,12 +1085,12 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { // In 64-bit mode, we need a 64-bit register even if pointers are 32 bits. if (Reg && Subtarget->isTarget64BitILP32()) { Register CopyReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32rr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32rr), CopyReg) .addReg(Reg); Register ExtReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), ExtReg) .addImm(0) .addReg(CopyReg) @@ -1267,7 +1267,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); // Add register to return instruction. @@ -1287,7 +1287,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { assert(Reg && "SRetReturnReg should have been set in LowerFormalArguments()!"); unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), RetReg).addReg(Reg); RetRegs.push_back(RetReg); } @@ -1295,11 +1295,11 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // Now emit the RET. MachineInstrBuilder MIB; if (X86MFInfo->getBytesToPopOnReturn()) { - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32)) .addImm(X86MFInfo->getBytesToPopOnReturn()); } else { - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32)); } for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) @@ -1405,7 +1405,7 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) { } bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT, - const DebugLoc &CurDbgLoc) { + const DebugLoc &CurMIMD) { Register Op0Reg = getRegForValue(Op0); if (Op0Reg == 0) return false; @@ -1418,7 +1418,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT, // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareImmOpc)) .addReg(Op0Reg) .addImm(Op1C->getSExtValue()); return true; @@ -1430,7 +1430,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT, Register Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareOpc)) .addReg(Op0Reg) .addReg(Op1Reg); @@ -1455,7 +1455,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { default: break; case CmpInst::FCMP_FALSE: { ResultReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0), ResultReg); ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit); if (!ResultReg) @@ -1464,7 +1464,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { } case CmpInst::FCMP_TRUE: { ResultReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri), ResultReg).addImm(1); break; } @@ -1506,11 +1506,11 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { Register FlagReg1 = createResultReg(&X86::GR8RegClass); Register FlagReg2 = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(SETFOpc[2]), ResultReg).addReg(FlagReg1).addReg(FlagReg2); updateValueMap(I, ResultReg); return true; @@ -1528,7 +1528,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), ResultReg).addImm(CC); updateValueMap(I, ResultReg); return true; @@ -1566,18 +1566,18 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { } Register Result32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovInst), Result32) .addReg(ResultReg); ResultReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) .addImm(0).addReg(Result32).addImm(X86::sub_32bit); } else if (DstVT == MVT::i16) { // i8->i16 doesn't exist in the autogenerated isel table. Need to zero // extend to 32-bits and then extract down to 16-bits. Register Result32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVZX32rr8), Result32).addReg(ResultReg); ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); @@ -1611,7 +1611,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) { // Negate the result to make an 8-bit sign extended value. ResultReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::NEG8r), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::NEG8r), ResultReg).addReg(ZExtReg); SrcVT = MVT::i8; @@ -1621,7 +1621,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) { // i8->i16 doesn't exist in the autogenerated isel table. Need to sign // extend to 32-bits and then extract down to 16-bits. Register Result32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVSX32rr8), Result32).addReg(ResultReg); ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); @@ -1655,8 +1655,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); switch (Predicate) { default: break; - case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true; - case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true; + case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, MIMD.getDL()); return true; + case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, MIMD.getDL()); return true; } const Value *CmpLHS = CI->getOperand(0); @@ -1706,13 +1706,13 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc())) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(CC); // X86 requires a second branch to handle UNE (and OEQ, which is mapped // to UNE above). if (NeedExtraBranch) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(X86::COND_P); } @@ -1737,7 +1737,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { Register OpReg = getRegForValue(TI->getOperand(0)); if (OpReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TestOpc)) .addReg(OpReg).addImm(1); unsigned JmpCond = X86::COND_NE; @@ -1746,7 +1746,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { JmpCond = X86::COND_E; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(JmpCond); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); @@ -1760,7 +1760,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (TmpReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(CC); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; @@ -1776,15 +1776,15 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) { unsigned KOpReg = OpReg; OpReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), OpReg) .addReg(KOpReg); OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit); } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) .addReg(OpReg) .addImm(1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(X86::COND_NE); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; @@ -1842,18 +1842,18 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { Register Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CReg).addReg(Op1Reg); // The shift instruction uses X86::CL. If we defined a super-register // of X86::CL, emit a subreg KILL to precisely describe what we're doing here. if (CReg != X86::CL) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::KILL), X86::CL) .addReg(CReg, RegState::Kill); Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpReg), ResultReg) .addReg(Op0Reg); updateValueMap(I, ResultReg); return true; @@ -1954,38 +1954,38 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { return false; // Move op0 into low-order input register. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg); // Zero-extend or sign-extend into high-order input register. if (OpEntry.OpSignExtend) { if (OpEntry.IsOpSigned) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpEntry.OpSignExtend)); else { Register Zero32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0), Zero32); // Copy the zero into the appropriate sub/super/identical physical // register. Unfortunately the operations needed are not uniform enough // to fit neatly into the table above. if (VT == MVT::i16) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), TypeEntry.HighInReg) .addReg(Zero32, 0, X86::sub_16bit); } else if (VT == MVT::i32) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), TypeEntry.HighInReg) .addReg(Zero32); } else if (VT == MVT::i64) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg) .addImm(0).addReg(Zero32).addImm(X86::sub_32bit); } } } // Generate the DIV/IDIV instruction. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpEntry.OpDivRem)).addReg(Op1Reg); // For i8 remainder, we can't reference ah directly, as we'll end // up with bogus copies like %r9b = COPY %ah. Reference ax @@ -2001,11 +2001,11 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) { Register SourceSuperReg = createResultReg(&X86::GR16RegClass); Register ResultSuperReg = createResultReg(&X86::GR16RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), SourceSuperReg).addReg(X86::AX); // Shift AX right by 8 bits instead of using AH. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SHR16ri), ResultSuperReg).addReg(SourceSuperReg).addImm(8); // Now reference the 8-bit subreg of the result. @@ -2015,7 +2015,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { // Copy the result out of the physreg if we haven't already. if (!ResultReg) { ResultReg = createResultReg(TypeEntry.RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), ResultReg) .addReg(OpEntry.DivRemResultReg); } updateValueMap(I, ResultReg); @@ -2081,17 +2081,17 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { if (SETFOpc) { Register FlagReg1 = createResultReg(&X86::GR8RegClass); Register FlagReg2 = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]); auto const &II = TII.get(SETFOpc[2]); if (II.getNumDefs()) { Register TmpReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, TmpReg) .addReg(FlagReg2).addReg(FlagReg1); } else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(FlagReg2).addReg(FlagReg1); } } @@ -2120,12 +2120,12 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { unsigned KCondReg = CondReg; CondReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CondReg) .addReg(KCondReg); CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) .addReg(CondReg) .addImm(1); } @@ -2211,7 +2211,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { // Need an IMPLICIT_DEF for the input that is used to generate the upper // bits of the result register since its not based on any of the inputs. Register ImplicitDefReg = createResultReg(VR128X); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); // Place RHSReg is the passthru of the masked movss/sd operation and put @@ -2222,7 +2222,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { ImplicitDefReg, LHSReg); ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg); } else if (Subtarget->hasAVX()) { @@ -2243,7 +2243,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg, CmpReg); ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg); } else { // Choose the SSE instruction sequence based on data type (float or double). @@ -2265,7 +2265,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg); Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg); ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg); } updateValueMap(I, ResultReg); @@ -2320,12 +2320,12 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { unsigned KCondReg = CondReg; CondReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CondReg) .addReg(KCondReg); CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) .addReg(CondReg) .addImm(1); } @@ -2367,7 +2367,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { return false; const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(OpReg); updateValueMap(I, ResultReg); @@ -2437,7 +2437,7 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) { MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT(); const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT); Register ImplicitDefReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg); updateValueMap(I, ResultReg); @@ -2468,14 +2468,14 @@ bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned ImplicitDefReg; if (HasAVX) { ImplicitDefReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); } Register ResultReg = createResultReg(RC); MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc), + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpc), ResultReg); if (HasAVX) @@ -2627,7 +2627,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr : X86::VMOVPDI2DIrr; ResultReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(InputReg, RegState::Kill); // The result value is in the lower 16-bits of ResultReg. @@ -2649,7 +2649,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // The result value is in the lower 32-bits of ResultReg. // Emit an explicit copy from register class VR128 to register class FR32. ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(InputReg, RegState::Kill); } @@ -2692,7 +2692,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // never directly reference the frame register (the TwoAddressInstruction- // Pass doesn't like that). Register SrcReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg); // Now recursively load from the frame address. @@ -2703,7 +2703,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned Depth = cast(II->getOperand(0))->getZExtValue(); while (Depth--) { Register DestReg = createResultReg(RC); - addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg), SrcReg); SrcReg = DestReg; } @@ -2777,16 +2777,16 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { if (!X86SelectAddress(DI->getAddress(), AM)) return false; const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); - assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && "Expected inlined-at fields to agree"); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM) + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II), AM) .addImm(0) .addMetadata(DI->getVariable()) .addMetadata(DI->getExpression()); return true; } case Intrinsic::trap: { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TRAP)); return true; } case Intrinsic::sqrt: { @@ -2827,13 +2827,13 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned ImplicitDefReg = 0; if (AVXLevel > 0) { ImplicitDefReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); } Register ResultReg = createResultReg(RC); MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); if (ImplicitDefReg) @@ -2907,7 +2907,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // We can use INC/DEC. ResultReg = createResultReg(TLI.getRegClassFor(VT)); bool IsDec = BaseOpc == ISD::SUB; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) .addReg(LHSReg); } else @@ -2930,7 +2930,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX }; // First copy the first operand into RAX, which is an implicit input to // the X86::MUL*r instruction. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) .addReg(LHSReg); ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], @@ -2941,7 +2941,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { if (VT == MVT::i8) { // Copy the first operand into AL, which is an implicit input to the // X86::IMUL8r instruction. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), X86::AL) .addReg(LHSReg); ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg); @@ -2956,7 +2956,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // Assign to a GPR since the overflow return value is lowered to a SETcc. Register ResultReg2 = createResultReg(&X86::GR8RegClass); assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers."); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), ResultReg2).addImm(CondCode); updateValueMap(II, ResultReg, 2); @@ -3026,7 +3026,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { return false; Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(Reg); updateValueMap(II, ResultReg); @@ -3125,7 +3125,7 @@ bool X86FastISel::fastLowerArguments() { // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); updateValueMap(&Arg, ResultReg); @@ -3295,7 +3295,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)) .addImm(NumBytes).addImm(0).addImm(0); // Walk the register/memloc assignments, inserting copies/loads. @@ -3385,7 +3385,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { } if (VA.isRegLoc()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); OutRegs.push_back(VA.getLocReg()); } else { @@ -3426,7 +3426,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { // GOT pointer. if (Subtarget->isPICStyleGOT()) { unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base); } @@ -3447,7 +3447,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); assert((Subtarget->hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri), X86::AL).addImm(NumXMMRegs); } @@ -3471,7 +3471,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { if (CalleeOp) { // Register-indirect call. unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc)) .addReg(CalleeOp); } else { // Direct call. @@ -3489,7 +3489,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { ? (Is64Bit ? X86::CALL64m : X86::CALL32m) : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc)); if (NeedLoad) MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0); if (Symbol) @@ -3522,7 +3522,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { ? NumBytes // Callee pops everything. : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB); unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(NumBytesForCalleeToPop); // Now handle call return values. @@ -3554,7 +3554,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { } // Copy out the result. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg); InRegs.push_back(VA.getLocReg()); @@ -3566,11 +3566,11 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)), FI) .addReg(CopyReg); Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt; - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg + i), FI); } } @@ -3659,7 +3659,7 @@ X86FastISel::fastSelectInstruction(const Instruction *I) { // with the wrong VT if we fall out of fast isel after selecting this. const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); Register ResultReg = createResultReg(DstClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); updateValueMap(I, ResultReg); @@ -3688,7 +3688,7 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) { return SrcReg; case MVT::i64: { Register ResultReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); return ResultReg; @@ -3772,10 +3772,10 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) { // Large code model only applies to 64-bit mode. if (Subtarget->is64Bit() && CM == CodeModel::Large) { Register AddrReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri), AddrReg) .addConstantPoolIndex(CPI, 0, OpFlag); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addRegReg(MIB, AddrReg, false, PICBase, false); MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( @@ -3785,7 +3785,7 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) { return ResultReg; } - addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg), CPI, PICBase, OpFlag); return ResultReg; @@ -3810,7 +3810,7 @@ unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) { TLI.getPointerTy(DL) == MVT::i64) { // The displacement code could be more than 32 bits away so we need to use // an instruction with a 64 bit immediate - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri), ResultReg) .addGlobalAddress(GV); } else { @@ -3818,7 +3818,7 @@ unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) { TLI.getPointerTy(DL) == MVT::i32 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r) : X86::LEA64r; - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg), AM); } return ResultReg; @@ -3860,7 +3860,7 @@ unsigned X86FastISel::fastMaterializeConstant(const Constant *C) { if (Opc) { Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); return ResultReg; } @@ -3890,7 +3890,7 @@ unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) { : X86::LEA64r; const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL)); Register ResultReg = createResultReg(RC); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg), AM); return ResultReg; } @@ -3926,7 +3926,7 @@ unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) { } Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); return ResultReg; } @@ -3990,18 +3990,18 @@ unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode, Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addReg(Op2) .addReg(Op3); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1) .addReg(Op2) .addReg(Op3); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp index 49c2ead..31a588a 100644 --- a/llvm/utils/TableGen/FastISelEmitter.cpp +++ b/llvm/utils/TableGen/FastISelEmitter.cpp @@ -655,7 +655,7 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS, for (unsigned i = 0; i < Memo.PhysRegs.size(); ++i) { if (Memo.PhysRegs[i] != "") - OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, " + OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, " << "TII.get(TargetOpcode::COPY), " << Memo.PhysRegs[i] << ").addReg(Op" << i << ");\n"; } -- 2.7.4