From: Jay Foad Date: Fri, 3 Feb 2023 10:04:28 +0000 (+0000) Subject: [CodeGen] Make more use of MachineOperand::getOperandNo. NFC. X-Git-Tag: upstream/17.0.6~18338 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a07584d57d7312d6bd5f266dc8cea915fb2b434b;p=platform%2Fupstream%2Fllvm.git [CodeGen] Make more use of MachineOperand::getOperandNo. NFC. Differential Revision: https://reviews.llvm.org/D143252 --- diff --git a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp index d126899..c73b33a 100644 --- a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp +++ b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp @@ -212,7 +212,7 @@ static bool isCandidateStore(const MachineInstr &MI, const MachineOperand &MO) { // In case we have str xA, [xA, #imm], this is two different uses // of xA and we cannot fold, otherwise the xA stored may be wrong, // even if #imm == 0. - return MI.getOperandNo(&MO) == 1 && + return MO.getOperandNo() == 1 && MI.getOperand(0).getReg() != MI.getOperand(1).getReg(); } } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp index c9cdbc8..29d6dfe 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp @@ -380,7 +380,7 @@ public: // TODO: Scan implicit defs too? for (const auto &Op : MI.defs()) { unsigned Latency = SchedModel.computeOperandLatency( - &MI, MI.getOperandNo(&Op), nullptr, 0); + &MI, Op.getOperandNo(), nullptr, 0); for (MCRegUnitIterator UI(Op.getReg(), TRI); UI.isValid(); ++UI) State[*UI] = DelayInfo(Type, Latency); } diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index 0cb5756..6c74054 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -2026,7 +2026,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) { MaxWaitStates); int NeedWaitStates = MFMAWritesAGPROverlappedSrcABWaitStates; int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); - int OpNo = MI->getOperandNo(&Op); + int OpNo = Op.getOperandNo(); if (OpNo == SrcCIdx) { NeedWaitStates = MFMAWritesAGPROverlappedSrcCWaitStates; } else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) { @@ -2205,7 +2205,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) { if (NumWaitStates == std::numeric_limits::max()) continue; - int OpNo = MI->getOperandNo(&Use); + int OpNo = Use.getOperandNo(); unsigned Opc1 = MI1->getOpcode(); int NeedWaitStates = 0; if (OpNo == SrcCIdx) { diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index e5a0288..74ede65 100644 --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -231,7 +231,7 @@ static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END) return false; - unsigned OpIdx = UseMI->getOperandNo(&MO); + unsigned OpIdx = MO.getOperandNo(); if (OpIdx >= UseMI->getDesc().getNumOperands() || !TII->isOperandLegal(*UseMI, OpIdx, &Src)) return false; @@ -658,7 +658,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { TRI->getEquivalentSGPRClass(SrcRC); Register NewDst = MRI->createVirtualRegister(DestRC); MachineBasicBlock *BlockToInsertCopy = - MI.isPHI() ? MI.getOperand(MI.getOperandNo(&MO) + 1).getMBB() + MI.isPHI() ? MI.getOperand(MO.getOperandNo() + 1).getMBB() : MBB; MachineBasicBlock::iterator PointToInsertCopy = MI.isPHI() ? BlockToInsertCopy->getFirstInstrTerminator() : I; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 025faec..8de8d45 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -841,7 +841,7 @@ public: const MachineOperand &UseMO, const MachineOperand &DefMO) const { assert(UseMO.getParent() == &MI); - int OpIdx = MI.getOperandNo(&UseMO); + int OpIdx = UseMO.getOperandNo(); if (OpIdx >= MI.getDesc().NumOperands) return false; @@ -873,8 +873,7 @@ public: } bool isInlineConstant(const MachineOperand &MO) const { - const MachineInstr *Parent = MO.getParent(); - return isInlineConstant(*Parent, Parent->getOperandNo(&MO)); + return isInlineConstant(*MO.getParent(), MO.getOperandNo()); } bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp index c21ff06..1b6d050 100644 --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -1158,7 +1158,7 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg()))) continue; - unsigned I = MI.getOperandNo(&Op); + unsigned I = Op.getOperandNo(); if (Desc.operands()[I].RegClass == -1 || !TRI->isVSSuperClass(TRI->getRegClass(Desc.operands()[I].RegClass))) continue; diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp index ad65002..7792355 100644 --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -161,14 +161,12 @@ bool SIShrinkInstructions::shouldShrinkTrue16(MachineInstr &MI) const { bool SIShrinkInstructions::isKImmOperand(const MachineOperand &Src) const { return isInt<16>(Src.getImm()) && - !TII->isInlineConstant(*Src.getParent(), - Src.getParent()->getOperandNo(&Src)); + !TII->isInlineConstant(*Src.getParent(), Src.getOperandNo()); } bool SIShrinkInstructions::isKUImmOperand(const MachineOperand &Src) const { return isUInt<16>(Src.getImm()) && - !TII->isInlineConstant(*Src.getParent(), - Src.getParent()->getOperandNo(&Src)); + !TII->isInlineConstant(*Src.getParent(), Src.getOperandNo()); } bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src, diff --git a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp index 358f903..e7a0b35 100644 --- a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp +++ b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp @@ -153,11 +153,10 @@ static bool dominatesAllUsesOf(const MachineInstr *MI, unsigned VReg, assert(Register::isVirtualRegister(VReg) && "Expected virtual register!"); - for (auto it = MRI->use_nodbg_begin(VReg), end = MRI->use_nodbg_end(); - it != end; ++it) { - MachineInstr *User = it->getParent(); + for (const MachineOperand &Use : MRI->use_nodbg_operands(VReg)) { + const MachineInstr *User = Use.getParent(); if (User->isPHI()) { - unsigned BBOperandIdx = User->getOperandNo(&*it) + 1; + unsigned BBOperandIdx = Use.getOperandNo() + 1; MachineBasicBlock *MBB = User->getOperand(BBOperandIdx).getMBB(); if (MBB->empty()) { const MachineBasicBlock *InstBB = MI->getParent(); diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp index bd07cb5..c8b156a6 100644 --- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -907,7 +907,7 @@ static bool producesFalseLanesZero(MachineInstr &MI, continue; // Skip the lr predicate reg int PIdx = llvm::findFirstVPTPredOperandIdx(MI); - if (PIdx != -1 && (int)MI.getOperandNo(&MO) == PIdx + 2) + if (PIdx != -1 && (int)MO.getOperandNo() == PIdx + 2) continue; // Check that this instruction will produce zeros in its false lanes: diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/llvm/lib/Target/M68k/M68kInstrInfo.cpp index dc394f5..15b97ba 100644 --- a/llvm/lib/Target/M68k/M68kInstrInfo.cpp +++ b/llvm/lib/Target/M68k/M68kInstrInfo.cpp @@ -609,7 +609,7 @@ bool M68kInstrInfo::isPCRelRegisterOperandLegal( const MachineInstr *MI = MO.getParent(); const unsigned NameIndices = M68kInstrNameIndices[MI->getOpcode()]; StringRef InstrName(&M68kInstrNameData[NameIndices]); - const unsigned OperandNo = MI->getOperandNo(&MO); + const unsigned OperandNo = MO.getOperandNo(); // If this machine operand is the 2nd operand, then check // whether the instruction has destination addressing mode 'k'. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 75311e3..a343fa1 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2563,7 +2563,7 @@ bool RISCVInstrInfo::hasAllNBitUsers(const MachineInstr &OrigMI, for (auto &UserOp : MRI.use_operands(MI->getOperand(0).getReg())) { const MachineInstr *UserMI = UserOp.getParent(); - unsigned OpIdx = UserMI->getOperandNo(&UserOp); + unsigned OpIdx = UserOp.getOperandNo(); switch (UserMI->getOpcode()) { default: diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index 927845aa..c8acb43 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -759,7 +759,7 @@ bool RISCVRegisterInfo::getRegAllocationHints( for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) { const MachineInstr &MI = *MO.getParent(); - unsigned OpIdx = MI.getOperandNo(&MO); + unsigned OpIdx = MO.getOperandNo(); bool NeedGPRC; if (isCompressible(MI, NeedGPRC)) { if (OpIdx == 0 && MI.getOperand(1).isReg()) { diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp index eeec0fc..08483b7 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -352,7 +352,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, OldReg); // If this register operand is tied to another operand, we can't // change it to an immediate. Untie it first. - MI.untieRegOperand(MI.getOperandNo(&MO)); + MI.untieRegOperand(MO.getOperandNo()); MO.ChangeToImmediate(LocalId); continue; } @@ -369,7 +369,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { if (MI.isInlineAsm()) { unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, OldReg); // Untie it first if this reg operand is tied to another operand. - MI.untieRegOperand(MI.getOperandNo(&MO)); + MI.untieRegOperand(MO.getOperandNo()); MO.ChangeToImmediate(LocalId); continue; }