/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the last new instruction.
///
- virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineInstr &MI,
+ virtual MachineInstr *convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const {
return nullptr;
}
bool TwoAddressInstructionPass::convertInstTo3Addr(
MachineBasicBlock::iterator &mi, MachineBasicBlock::iterator &nmi,
Register RegA, Register RegB, unsigned Dist) {
- // FIXME: Why does convertToThreeAddress() need an iterator reference?
- MachineFunction::iterator MFI = MBB->getIterator();
- MachineInstr *NewMI = TII->convertToThreeAddress(MFI, *mi, LV);
- assert(MBB->getIterator() == MFI &&
- "convertToThreeAddress changed iterator reference");
+ MachineInstr *NewMI = TII->convertToThreeAddress(*mi, LV);
if (!NewMI)
return false;
// Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
// instruction, so we might as well convert it to the more flexible VOP3-only
// mad/fma form.
- MachineFunction::iterator MBBI = Def->getParent()->getIterator();
- if (TII->convertToThreeAddress(MBBI, *Def, nullptr))
+ if (TII->convertToThreeAddress(*Def, nullptr))
Def->eraseFromParent();
return true;
// Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
// instruction, so we might as well convert it to the more flexible VOP3-only
// mad/fma form.
- MachineFunction::iterator MBBI = Def->getParent()->getIterator();
- if (TII->convertToThreeAddress(MBBI, *Def, nullptr))
+ if (TII->convertToThreeAddress(*Def, nullptr))
Def->eraseFromParent();
return true;
}
}
-MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
- MachineInstr &MI,
+MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const {
unsigned Opc = MI.getOpcode();
bool IsF16 = false;
const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
MachineInstrBuilder MIB;
+ MachineBasicBlock &MBB = *MI.getParent();
if (!Src0Mods && !Src1Mods && !Clamp && !Omod && !IsF64 &&
// If we have an SGPR input, we will violate the constant bus restriction.
(ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() ||
- !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) {
+ !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) {
int64_t Imm;
if (getFoldableImm(Src2, Imm)) {
unsigned NewOpc =
IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32)
: (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32);
if (pseudoToMCOpcode(NewOpc) != -1) {
- MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
+ MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
.add(*Dst)
.add(*Src0)
.add(*Src1)
: (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32);
if (getFoldableImm(Src1, Imm)) {
if (pseudoToMCOpcode(NewOpc) != -1) {
- MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
+ MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
.add(*Dst)
.add(*Src0)
.addImm(Imm)
isOperandLegal(
MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
Src1)) {
- MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
+ MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
.add(*Dst)
.add(*Src1)
.addImm(Imm)
if (pseudoToMCOpcode(NewOpc) == -1)
return nullptr;
- MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
+ MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
.add(*Dst)
.addImm(Src0Mods ? Src0Mods->getImm() : 0)
.add(*Src0)
unsigned getMachineCSELookAheadLimit() const override { return 500; }
- MachineInstr *convertToThreeAddress(MachineFunction::iterator &MBB,
- MachineInstr &MI,
+ MachineInstr *convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const override;
bool isSchedulingBoundary(const MachineInstr &MI,
return MHR;
}
-MachineInstr *ARMBaseInstrInfo::convertToThreeAddress(
- MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
+MachineInstr *ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI,
+ LiveVariables *LV) const {
// FIXME: Thumb2 support.
if (!EnableARM3Addr)
}
}
- MachineBasicBlock::iterator MBBI = MI.getIterator();
- MFI->insert(MBBI, NewMIs[1]);
- MFI->insert(MBBI, NewMIs[0]);
+ MachineBasicBlock &MBB = *MI.getParent();
+ MBB.insert(MI, NewMIs[1]);
+ MBB.insert(MI, NewMIs[0]);
return NewMIs[0];
}
// if there is not such an opcode.
virtual unsigned getUnindexedOpcode(unsigned Opc) const = 0;
- MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineInstr &MI,
+ MachineInstr *convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const override;
virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
-MachineInstr *RISCVInstrInfo::convertToThreeAddress(
- MachineFunction::iterator &MBB, MachineInstr &MI, LiveVariables *LV) const {
+MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
+ LiveVariables *LV) const {
switch (MI.getOpcode()) {
default:
break;
}
//clang-format on
- MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
.add(MI.getOperand(0))
.add(MI.getOperand(1))
.add(MI.getOperand(2))
unsigned OpIdx1,
unsigned OpIdx2) const override;
- MachineInstr *convertToThreeAddress(MachineFunction::iterator &MBB,
- MachineInstr &MI,
+ MachineInstr *convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const override;
Register getVLENFactoredAmount(
NewMI->setFlag(Flag);
}
-MachineInstr *SystemZInstrInfo::convertToThreeAddress(
- MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
+MachineInstr *SystemZInstrInfo::convertToThreeAddress(MachineInstr &MI,
+ LiveVariables *LV) const {
MachineBasicBlock *MBB = MI.getParent();
// Try to convert an AND into an RISBG-type instruction.
Register DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
- MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineInstr &MI,
+ MachineInstr *convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const override;
MachineInstr *
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
// These instructions are all fine to convert.
break;
}
- MachineFunction::iterator MFI = MBB.getIterator();
- return TII->convertToThreeAddress(MFI, MI, nullptr);
+ return TII->convertToThreeAddress(MI, nullptr);
}
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
return true;
}
-MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
- unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
- LiveVariables *LV, bool Is8BitOp) const {
+MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
+ MachineInstr &MI,
+ LiveVariables *LV,
+ bool Is8BitOp) const {
// We handle 8-bit adds and various 16-bit opcodes in the switch below.
- MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
*RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
"Unexpected type for LEA transform");
bool IsKill = MI.getOperand(1).isKill();
unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
- BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
MachineInstr *InsMI =
- BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(InRegLEA, RegState::Define, SubReg)
.addReg(Src, getKillRegState(IsKill));
MachineInstrBuilder MIB =
- BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::SHL8ri:
InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// we will be shifting and then extracting the lower 8/16-bits.
- BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
- InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
+ InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(InRegLEA2, RegState::Define, SubReg)
.addReg(Src2, getKillRegState(IsKill2));
addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
- BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(Dest, RegState::Define | getDeadRegState(IsDead))
.addReg(OutRegLEA, RegState::Kill, SubReg);
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
-MachineInstr *
-X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineInstr &MI, LiveVariables *LV) const {
+MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
+ LiveVariables *LV) const {
// The following opcodes also sets the condition code register(s). Only
// convert them to equivalent lea if the condition code register def's
// are dead!
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt))
return nullptr;
- return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
+ return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
}
case X86::INC64r:
case X86::INC32r: {
LLVM_FALLTHROUGH;
case X86::DEC16r:
case X86::INC16r:
- return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
+ return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
case X86::ADD64rr:
case X86::ADD64rr_DB:
case X86::ADD32rr:
LLVM_FALLTHROUGH;
case X86::ADD16rr:
case X86::ADD16rr_DB:
- return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
+ return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64ri32_DB:
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
- return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
+ return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
case X86::SUB8ri:
case X86::SUB16ri8:
case X86::SUB16ri:
LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
}
- MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
+ MachineBasicBlock &MBB = *MI.getParent();
+ MBB.insert(MI.getIterator(), NewMI); // Insert the new inst
return NewMI;
}
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
- MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineInstr &MI,
+ MachineInstr *convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV) const override;
/// Returns true iff the routine could find two commutable operands in the
/// We use 32-bit LEA to form 3-address code by promoting to a 32-bit
/// super-register and then truncating back down to a 8/16-bit sub-register.
MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc,
- MachineFunction::iterator &MFI,
MachineInstr &MI,
LiveVariables *LV,
bool Is8BitOp) const;