From f1362f61960b36665592f34811540096ddc089f4 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Sat, 28 Feb 2015 12:04:00 +0000 Subject: [PATCH] ArrayRefize memory operand folding. NFC. llvm-svn: 230846 --- llvm/include/llvm/Target/TargetInstrInfo.h | 32 +++++++++---------- llvm/lib/CodeGen/TargetInstrInfo.cpp | 30 +++++++----------- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 8 ++--- llvm/lib/Target/AArch64/AArch64InstrInfo.h | 7 ++--- llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp | 7 ++--- llvm/lib/Target/Hexagon/HexagonInstrInfo.h | 12 +++---- llvm/lib/Target/R600/AMDGPUInstrInfo.cpp | 24 ++++++-------- llvm/lib/Target/R600/AMDGPUInstrInfo.h | 13 ++++---- llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 15 +++++---- llvm/lib/Target/SystemZ/SystemZInstrInfo.h | 8 ++--- llvm/lib/Target/X86/X86InstrInfo.cpp | 47 +++++++++++++--------------- llvm/lib/Target/X86/X86InstrInfo.h | 21 ++++++------- 12 files changed, 99 insertions(+), 125 deletions(-) diff --git a/llvm/include/llvm/Target/TargetInstrInfo.h b/llvm/include/llvm/Target/TargetInstrInfo.h index 247f9d8..f067cc0 100644 --- a/llvm/include/llvm/Target/TargetInstrInfo.h +++ b/llvm/include/llvm/Target/TargetInstrInfo.h @@ -672,16 +672,15 @@ public: /// operand folded, otherwise NULL is returned. /// The new instruction is inserted before MI, and the client is responsible /// for removing the old instruction. - MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; + MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI, + ArrayRef Ops, int FrameIndex) const; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. - MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const; + MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI, + ArrayRef Ops, + MachineInstr *LoadMI) const; /// hasPattern - return true when there is potentially a faster code sequence /// for an instruction chain ending in \p Root. All potential pattern are @@ -723,20 +722,20 @@ protected: /// foldMemoryOperandImpl - Target-dependent implementation for /// foldMemoryOperand. Target-independent code in foldMemoryOperand will /// take care of adding a MachineMemOperand to the newly created instruction. - virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { + virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef Ops, + int FrameIndex) const { return nullptr; } /// foldMemoryOperandImpl - Target-dependent implementation for /// foldMemoryOperand. Target-independent code in foldMemoryOperand will /// take care of adding a MachineMemOperand to the newly created instruction. - virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef Ops, + MachineInstr *LoadMI) const { return nullptr; } @@ -786,9 +785,8 @@ protected: public: /// canFoldMemoryOperand - Returns true for the specified load / store if /// folding is possible. - virtual - bool canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl &Ops) const; + virtual bool canFoldMemoryOperand(const MachineInstr *MI, + ArrayRef Ops) const; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 2566c1f..22536e9 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -377,16 +377,13 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { llvm_unreachable("Not a MachO target"); } -bool TargetInstrInfo:: -canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl &Ops) const { +bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, + ArrayRef Ops) const { return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]); } -static MachineInstr* foldPatchpoint(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex, +static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, int FrameIndex, const TargetInstrInfo &TII) { unsigned StartIdx = 0; switch (MI->getOpcode()) { @@ -405,9 +402,8 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, // Return false if any operands requested for folding are not foldable (not // part of the stackmap's live values). - for (SmallVectorImpl::const_iterator I = Ops.begin(), E = Ops.end(); - I != E; ++I) { - if (*I < StartIdx) + for (unsigned Op : Ops) { + if (Op < StartIdx) return nullptr; } @@ -448,10 +444,9 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, /// operand folded, otherwise NULL is returned. The client is responsible for /// removing the old instruction and adding the new one in the instruction /// stream. -MachineInstr* -TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl &Ops, - int FI) const { +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, + ArrayRef Ops, + int FI) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (MI->getOperand(Ops[i]).isDef()) @@ -517,10 +512,9 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. -MachineInstr* -TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, + ArrayRef Ops, + MachineInstr *LoadMI) const { assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 64cec55..b5870da 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2068,10 +2068,10 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB, .setMIFlag(Flag); } -MachineInstr * -AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { +MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef Ops, + int FrameIndex) const { // This is a bit of a hack. Consider this instruction: // // %vreg0 = COPY %SP; GPR64all:%vreg0 diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index d8f1274..fa4b8b7 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -129,10 +129,9 @@ public: const TargetRegisterInfo *TRI) const override; using TargetInstrInfo::foldMemoryOperandImpl; - MachineInstr * - foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const override; + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, + int FrameIndex) const override; bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 9bae12c..f3aabf8 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -550,11 +550,10 @@ void HexagonInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, llvm_unreachable("Unimplemented"); } - MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FI) const { + MachineInstr *MI, + ArrayRef Ops, + int FI) const { // Hexagon_TODO: Implement. return nullptr; } diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h index 6acfbec..f60fddf 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h @@ -102,15 +102,13 @@ public: const TargetRegisterClass *RC, SmallVectorImpl &NewMIs) const; - MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, int FrameIndex) const override; - MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const override { + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, + MachineInstr *LoadMI) const override { return nullptr; } diff --git a/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp b/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp index f4de2d6..28c1576 100644 --- a/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp +++ b/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp @@ -152,26 +152,22 @@ bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const return true; } - -MachineInstr * -AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { +MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef Ops, + int FrameIndex) const { // TODO: Implement this function return nullptr; } -MachineInstr* -AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - MachineInstr *LoadMI) const { +MachineInstr * +AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, + MachineInstr *LoadMI) const { // TODO: Implement this function return nullptr; } -bool -AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl &Ops) const { +bool AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, + ArrayRef Ops) const { // TODO: Implement this function return false; } diff --git a/llvm/lib/Target/R600/AMDGPUInstrInfo.h b/llvm/lib/Target/R600/AMDGPUInstrInfo.h index 202183c..07042b5 100644 --- a/llvm/lib/Target/R600/AMDGPUInstrInfo.h +++ b/llvm/lib/Target/R600/AMDGPUInstrInfo.h @@ -85,14 +85,13 @@ public: const TargetRegisterInfo *TRI) const override; protected: - MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, int FrameIndex) const override; - MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, MachineInstr *LoadMI) const override; + public: /// \returns the smallest register index that will be accessed by an indirect /// read or write or -1 if indirect addressing is not used by this program. @@ -103,7 +102,7 @@ public: int getIndirectIndexEnd(const MachineFunction &MF) const; bool canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl &Ops) const override; + ArrayRef Ops) const override; bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, unsigned Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl &NewMIs) const override; diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index 8488ec8..5128993 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -743,11 +743,10 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; } -MachineInstr * -SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { +MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef Ops, + int FrameIndex) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Opcode = MI->getOpcode(); @@ -862,9 +861,9 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, } MachineInstr * -SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { +SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, + MachineInstr *LoadMI) const { return nullptr; } diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h index e711f89..b55810b 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -186,11 +186,11 @@ public: MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl &Ops, + ArrayRef Ops, int FrameIndex) const override; - MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const override; + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, + MachineInstr *LoadMI) const override; bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override; bool ReverseBranchCondition(SmallVectorImpl &Cond) const override; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index ea4e251..36cbe08 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4573,9 +4573,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI, return nullptr; // Check whether we can fold the def into SrcOperandId. - SmallVector Ops; - Ops.push_back(SrcOperandId); - MachineInstr *FoldMI = foldMemoryOperand(MI, Ops, DefMI); + MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI); if (FoldMI) { FoldAsLoadDefReg = 0; return FoldMI; @@ -4670,7 +4668,7 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { } static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, - const SmallVectorImpl &MOs, + ArrayRef MOs, MachineInstr *MI, const TargetInstrInfo &TII) { // Create the base instruction with the memory operand as the first part. @@ -4697,9 +4695,8 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, return MIB; } -static MachineInstr *FuseInst(MachineFunction &MF, - unsigned Opcode, unsigned OpNo, - const SmallVectorImpl &MOs, +static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, + unsigned OpNo, ArrayRef MOs, MachineInstr *MI, const TargetInstrInfo &TII) { // Omit the implicit operands, something BuildMI can't do. MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), @@ -4723,7 +4720,7 @@ static MachineInstr *FuseInst(MachineFunction &MF, } static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, - const SmallVectorImpl &MOs, + ArrayRef MOs, MachineInstr *MI) { MachineFunction &MF = *MI->getParent()->getParent(); MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode)); @@ -4736,12 +4733,12 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, return MIB.addImm(0); } -MachineInstr* -X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, unsigned OpNum, - const SmallVectorImpl &MOs, - unsigned Size, unsigned Align, - bool AllowCommute) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + unsigned OpNum, + ArrayRef MOs, + unsigned Size, unsigned Align, + bool AllowCommute) const { const DenseMap > *OpcodeTablePtr = nullptr; bool isCallRegIndirect = Subtarget.callRegIndirect(); @@ -5104,10 +5101,10 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, MI->addRegisterKilled(Reg, TRI, true); } -MachineInstr* -X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef Ops, + int FrameIndex) const { // Check switch flag if (NoFusing) return nullptr; @@ -5145,10 +5142,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, } else if (Ops.size() != 1) return nullptr; - SmallVector MOs; - MOs.push_back(MachineOperand::CreateFI(FrameIndex)); - return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, - Size, Alignment, /*AllowCommute=*/true); + return foldMemoryOperandImpl(MF, MI, Ops[0], + MachineOperand::CreateFI(FrameIndex), Size, + Alignment, /*AllowCommute=*/true); } static bool isPartialRegisterLoad(const MachineInstr &LoadMI, @@ -5170,9 +5166,9 @@ static bool isPartialRegisterLoad(const MachineInstr &LoadMI, return false; } -MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, +MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl &Ops, + ArrayRef Ops, MachineInstr *LoadMI) const { // If loading from a FrameIndex, fold directly from the FrameIndex. unsigned NumOps = LoadMI->getDesc().getNumOperands(); @@ -5304,9 +5300,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, /*Size=*/0, Alignment, /*AllowCommute=*/true); } - bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl &Ops) const { + ArrayRef Ops) const { // Check switch flag if (NoFusing) return 0; diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 4d15467..0dd8101 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -305,23 +305,21 @@ public: /// folding and return true, otherwise it should return false. If it folds /// the instruction, it is likely that the MachineInstruction the iterator /// references has been changed. - MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, int FrameIndex) const override; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. - MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const override; + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, + MachineInstr *LoadMI) const override; /// canFoldMemoryOperand - Returns true if the specified load / store is /// folding is possible. - bool canFoldMemoryOperand(const MachineInstr*, - const SmallVectorImpl &) const override; + bool canFoldMemoryOperand(const MachineInstr *, + ArrayRef) const override; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is @@ -406,10 +404,9 @@ public: void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override; - MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, + MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, unsigned OpNum, - const SmallVectorImpl &MOs, + ArrayRef MOs, unsigned Size, unsigned Alignment, bool AllowCommute) const; -- 2.7.4