/// and may be desirable to set to a state that is particularly desirable to
/// default construct.
///
+/// Having a supported zero-valued tag also enables getting the address of a
+/// pointer stored with that tag provided it is stored in its natural bit
+/// representation. This works because in the case of a zero-valued tag, the
+/// pointer's value is directly stored into this object and we can expose the
+/// address of that internal storage. This is especially useful when building an
+/// `ArrayRef` of a single pointer stored in a sum type.
+///
/// There is no support for constructing or accessing with a dynamic tag as
/// that would fundamentally violate the type safety provided by the sum type.
template <typename TagT, typename... MemberTs> class PointerSumType {
- uintptr_t Value = 0;
-
using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+ // We keep both the raw value and the min tag value's pointer in a union. When
+ // the minimum tag value is zero, this allows code below to cleanly expose the
+ // address of the zero-tag pointer instead of just the zero-tag pointer
+ // itself. This is especially useful when building `ArrayRef`s out of a single
+ // pointer. However, we have to carefully access the union due to the active
+ // member potentially changing. When we *store* a new value, we directly
+ // access the union to allow us to store using the obvious types. However,
+ // when we *read* a value, we copy the underlying storage out to avoid relying
+ // on one member or the other being active.
+ union StorageT {
+ // Ensure we get a null default constructed value.
+ uintptr_t Value = 0;
+
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT MinTagPointer;
+ };
+
+ StorageT Storage;
+
public:
constexpr PointerSumType() = default;
+ /// A typed setter to a given tagged member of the sum type.
+ template <TagT N>
+ void set(typename HelperT::template Lookup<N>::PointerT Pointer) {
+ void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+ assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+ "Pointer is insufficiently aligned to store the discriminant!");
+ Storage.Value = reinterpret_cast<uintptr_t>(V) | N;
+ }
+
/// A typed constructor for a specific tagged member of the sum type.
template <TagT N>
static PointerSumType
create(typename HelperT::template Lookup<N>::PointerT Pointer) {
PointerSumType Result;
- void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
- assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
- "Pointer is insufficiently aligned to store the discriminant!");
- Result.Value = reinterpret_cast<uintptr_t>(V) | N;
+ Result.set<N>(Pointer);
return Result;
}
- TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
+ /// Clear the value to null with the min tag type.
+ void clear() { set<HelperT::MinTag>(nullptr); }
+
+ TagT getTag() const {
+ return static_cast<TagT>(getOpaqueValue() & HelperT::TagMask);
+ }
template <TagT N> bool is() const { return N == getTag(); }
template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
- void *P = is<N>() ? getImpl() : nullptr;
+ void *P = is<N>() ? getVoidPtr() : nullptr;
return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
}
template <TagT N>
typename HelperT::template Lookup<N>::PointerT cast() const {
assert(is<N>() && "This instance has a different active member.");
- return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
+ return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(
+ getVoidPtr());
+ }
+
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT const *
+ getAddrOfZeroTagPointer() const {
+ return const_cast<PointerSumType *>(this)->getAddrOfZeroTagPointer();
}
- explicit operator bool() const { return Value & HelperT::PointerMask; }
- bool operator==(const PointerSumType &R) const { return Value == R.Value; }
- bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
- bool operator<(const PointerSumType &R) const { return Value < R.Value; }
- bool operator>(const PointerSumType &R) const { return Value > R.Value; }
- bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
- bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT *
+ getAddrOfZeroTagPointer() {
+ static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!");
+ assert(is<HelperT::MinTag>() && "The active tag is not zero!");
+ // Store the initial value of the pointer when read out of our storage.
+ auto InitialPtr = get<HelperT::MinTag>();
+ // Now update the active member of the union to be the actual pointer-typed
+ // member so that accessing it indirectly through the returned address is
+ // valid.
+ Storage.MinTagPointer = InitialPtr;
+ // Finally, validate that this was a no-op as expected by reading it back
+ // out using the same underlying-storage read as above.
+ assert(InitialPtr == get<HelperT::MinTag>() &&
+ "Switching to typed storage changed the pointer returned!");
+ // Now we can correctly return an address to typed storage.
+ return &Storage.MinTagPointer;
+ }
+
+ explicit operator bool() const {
+ return getOpaqueValue() & HelperT::PointerMask;
+ }
+ bool operator==(const PointerSumType &R) const {
+ return getOpaqueValue() == R.getOpaqueValue();
+ }
+ bool operator!=(const PointerSumType &R) const {
+ return getOpaqueValue() != R.getOpaqueValue();
+ }
+ bool operator<(const PointerSumType &R) const {
+ return getOpaqueValue() < R.getOpaqueValue();
+ }
+ bool operator>(const PointerSumType &R) const {
+ return getOpaqueValue() > R.getOpaqueValue();
+ }
+ bool operator<=(const PointerSumType &R) const {
+ return getOpaqueValue() <= R.getOpaqueValue();
+ }
+ bool operator>=(const PointerSumType &R) const {
+ return getOpaqueValue() >= R.getOpaqueValue();
+ }
- uintptr_t getOpaqueValue() const { return Value; }
+ uintptr_t getOpaqueValue() const {
+ uintptr_t Value;
+ // Read the underlying storage of the union, regardless of the active
+ // member.
+ memcpy(&Value, &Storage, sizeof(Value));
+ return Value;
+ }
protected:
- void *getImpl() const {
- return reinterpret_cast<void *>(Value & HelperT::PointerMask);
+ void *getVoidPtr() const {
+ return reinterpret_cast<void *>(getOpaqueValue() & HelperT::PointerMask);
}
};
enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
// Also compute the smallest discriminant and various masks for convenience.
+ constexpr static TagT MinTag =
+ static_cast<TagT>(Min<MemberTs::Tag...>::value);
enum : uint64_t {
- MinTag = Min<MemberTs::Tag...>::value,
PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
TagMask = ~PointerMask
};
/// Allocate and initialize a register mask with @p NumRegister bits.
uint32_t *allocateRegMask();
- /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
- /// pointers. This array is owned by the MachineFunction.
- MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
-
- /// extractLoadMemRefs - Allocate an array and populate it with just the
- /// load information from the given MachineMemOperand sequence.
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator>
- extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End);
-
- /// extractStoreMemRefs - Allocate an array and populate it with just the
- /// store information from the given MachineMemOperand sequence.
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator>
- extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End);
+ /// Allocate and construct an extra info structure for a `MachineInstr`.
+ ///
+ /// This is allocated on the function's allocator and so lives the life of
+ /// the function.
+ MachineInstr::ExtraInfo *
+ createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol = nullptr,
+ MCSymbol *PostInstrSymbol = nullptr);
/// Allocate a string and populate it with the given external symbol name.
const char *createExternalSymbolName(StringRef Name);
#define LLVM_CODEGEN_MACHINEINSTR_H
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerSumType.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/TrailingObjects.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
: public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
ilist_sentinel_tracking<true>> {
public:
- using mmo_iterator = MachineMemOperand **;
+ using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
/// Flags to specify different kinds of comments to output in
/// assembly code. These flags carry semantic information not
// anything other than to convey comment
// information to AsmPrinter.
- uint8_t NumMemRefs = 0; // Information on memory references.
- // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'.
- // Calling code must treat missing information conservatively. If the number
- // of memory operands required to be precise exceeds the maximum value of
- // NumMemRefs - currently 256 - we remove the operands entirely. Note also
- // that this is a non-owning reference to a shared copy on write buffer owned
- // by the MachineFunction and created via MF.allocateMemRefsArray.
- mmo_iterator MemRefs = nullptr;
+ /// Internal implementation detail class that provides out-of-line storage for
+ /// extra info used by the machine instruction when this info cannot be stored
+ /// in-line within the instruction itself.
+ ///
+ /// This has to be defined eagerly due to the implementation constraints of
+ /// `PointerSumType` where it is used.
+ class ExtraInfo final
+ : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
+ public:
+ static ExtraInfo *create(BumpPtrAllocator &Allocator,
+ ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol = nullptr,
+ MCSymbol *PostInstrSymbol = nullptr) {
+ bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
+ bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
+ auto *Result = new (Allocator.Allocate(
+ totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
+ MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
+ alignof(ExtraInfo)))
+ ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
+
+ // Copy the actual data into the trailing objects.
+ std::copy(MMOs.begin(), MMOs.end(),
+ Result->getTrailingObjects<MachineMemOperand *>());
+
+ if (HasPreInstrSymbol)
+ Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
+ if (HasPostInstrSymbol)
+ Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
+ PostInstrSymbol;
+
+ return Result;
+ }
+
+ ArrayRef<MachineMemOperand *> getMMOs() const {
+ return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
+ }
+
+ MCSymbol *getPreInstrSymbol() const {
+ return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
+ }
+
+ MCSymbol *getPostInstrSymbol() const {
+ return HasPostInstrSymbol
+ ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
+ : nullptr;
+ }
+
+ private:
+ friend TrailingObjects;
+
+ // Description of the extra info, used to interpret the actual optional
+ // data appended.
+ //
+ // Note that this is not terribly space optimized. This leaves a great deal
+ // of flexibility to fit more in here later.
+ const int NumMMOs;
+ const bool HasPreInstrSymbol;
+ const bool HasPostInstrSymbol;
+
+ // Implement the `TrailingObjects` internal API.
+ size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
+ return NumMMOs;
+ }
+ size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
+ return HasPreInstrSymbol + HasPostInstrSymbol;
+ }
+
+ // Just a boring constructor to allow us to initialize the sizes. Always use
+ // the `create` routine above.
+ ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
+ : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
+ HasPostInstrSymbol(HasPostInstrSymbol) {}
+ };
+
+ /// Enumeration of the kinds of inline extra info available. It is important
+ /// that the `MachineMemOperand` inline kind has a tag value of zero to make
+ /// it accessible as an `ArrayRef`.
+ enum ExtraInfoInlineKinds {
+ EIIK_MMO = 0,
+ EIIK_PreInstrSymbol,
+ EIIK_PostInstrSymbol,
+ EIIK_OutOfLine
+ };
+
+ // We store extra information about the instruction here. The common case is
+ // expected to be nothing or a single pointer (typically a MMO or a symbol).
+ // We work to optimize this common case by storing it inline here rather than
+ // requiring a separate allocation, but we fall back to an allocation when
+ // multiple pointers are needed.
+ PointerSumType<ExtraInfoInlineKinds,
+ PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
+ PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
+ PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
+ PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
+ Info;
DebugLoc debugLoc; // Source line information.
return I - operands_begin();
}
- /// Access to memory operands of the instruction
- mmo_iterator memoperands_begin() const { return MemRefs; }
- mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+ /// Access to memory operands of the instruction. If there are none, that does
+ /// not imply anything about whether the function accesses memory. Instead,
+ /// the caller must behave conservatively.
+ ArrayRef<MachineMemOperand *> memoperands() const {
+ if (!Info)
+ return {};
+
+ if (Info.is<EIIK_MMO>())
+ return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
+
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getMMOs();
+
+ return {};
+ }
+
+ /// Access to memory operands of the instruction.
+ ///
+ /// If `memoperands_begin() == memoperands_end()`, that does not imply
+ /// anything about whether the function accesses memory. Instead, the caller
+ /// must behave conservatively.
+ mmo_iterator memoperands_begin() const { return memoperands().begin(); }
+
+ /// Access to memory operands of the instruction.
+ ///
+ /// If `memoperands_begin() == memoperands_end()`, that does not imply
+ /// anything about whether the function accesses memory. Instead, the caller
+ /// must behave conservatively.
+ mmo_iterator memoperands_end() const { return memoperands().end(); }
+
/// Return true if we don't have any memory operands which described the
/// memory access done by this instruction. If this is true, calling code
/// must be conservative.
- bool memoperands_empty() const { return NumMemRefs == 0; }
-
- iterator_range<mmo_iterator> memoperands() {
- return make_range(memoperands_begin(), memoperands_end());
- }
- iterator_range<mmo_iterator> memoperands() const {
- return make_range(memoperands_begin(), memoperands_end());
- }
+ bool memoperands_empty() const { return memoperands().empty(); }
/// Return true if this instruction has exactly one MachineMemOperand.
- bool hasOneMemOperand() const {
- return NumMemRefs == 1;
- }
+ bool hasOneMemOperand() const { return memoperands().size() == 1; }
/// Return the number of memory operands.
- unsigned getNumMemOperands() const { return NumMemRefs; }
+ unsigned getNumMemOperands() const { return memoperands().size(); }
+
+ /// Helper to extract a pre-instruction symbol if one has been added.
+ MCSymbol *getPreInstrSymbol() const {
+ if (!Info)
+ return nullptr;
+ if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
+ return S;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getPreInstrSymbol();
+
+ return nullptr;
+ }
+
+ /// Helper to extract a post-instruction symbol if one has been added.
+ MCSymbol *getPostInstrSymbol() const {
+ if (!Info)
+ return nullptr;
+ if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
+ return S;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getPostInstrSymbol();
+
+ return nullptr;
+ }
/// API for querying MachineInstr properties. They are the same as MCInstrDesc
/// queries but they are bundle aware.
/// fewer operand than it started with.
void RemoveOperand(unsigned OpNo);
+ /// Clear this MachineInstr's memory reference descriptor list. This resets
+ /// the memrefs to their most conservative state. This should be used only
+ /// as a last resort since it greatly pessimizes our knowledge of the memory
+ /// access performed by the instruction.
+ void dropMemRefs(MachineFunction &MF);
+
+ /// Assign this MachineInstr's memory reference descriptor list.
+ ///
+ /// Unlike other methods, this *will* allocate them into a new array
+ /// associated with the provided `MachineFunction`.
+ void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
+
/// Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
- /// Assign this MachineInstr's memory reference descriptor list.
- /// This does not transfer ownership.
- void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
- setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
- }
+ /// Clone another MachineInstr's memory reference descriptor list and replace
+ /// ours with it.
+ ///
+ /// Note that `*this` may be the incoming MI!
+ ///
+ /// Prefer this API whenever possible as it can avoid allocations in common
+ /// cases.
+ void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
- /// Assign this MachineInstr's memory reference descriptor list. First
- /// element in the pair is the begin iterator/pointer to the array; the
- /// second is the number of MemoryOperands. This does not transfer ownership
- /// of the underlying memory.
- void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
- MemRefs = NewMemRefs.first;
- NumMemRefs = uint8_t(NewMemRefs.second);
- assert(NumMemRefs == NewMemRefs.second &&
- "Too many memrefs - must drop memory operands");
- }
+ /// Clone the merge of multiple MachineInstrs' memory reference descriptors
+ /// list and replace ours with it.
+ ///
+ /// Note that `*this` may be one of the incoming MIs!
+ ///
+ /// Prefer this API whenever possible as it can avoid allocations in common
+ /// cases.
+ void cloneMergedMemRefs(MachineFunction &MF,
+ ArrayRef<const MachineInstr *> MIs);
- /// Return a set of memrefs (begin iterator, size) which conservatively
- /// describe the memory behavior of both MachineInstrs. This is appropriate
- /// for use when merging two MachineInstrs into one. This routine does not
- /// modify the memrefs of the this MachineInstr.
- std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
+ /// Get or create a temporary symbol that will be emitted just prior to the
+ /// instruction itself.
+ ///
+ /// FIXME: This is not fully implemented yet.
+ MCSymbol *getOrCreatePreInstrTempSymbol(MCContext &MCCtx);
+
+ /// Get or create a temporary symbol that will be emitted just after the
+ /// instruction itself.
+ ///
+ /// FIXME: This is not fully implemented yet.
+ MCSymbol *getOrCreatePostInstrTempSymbol(MCContext &MCCtx);
/// Return the MIFlags which represent both MachineInstrs. This
/// should be used when merging two MachineInstrs into one. This routine does
/// not modify the MIFlags of this MachineInstr.
uint16_t mergeFlagsWith(const MachineInstr& Other) const;
- /// Clear this MachineInstr's memory reference descriptor list. This resets
- /// the memrefs to their most conservative state. This should be used only
- /// as a last resort since it greatly pessimizes our knowledge of the memory
- /// access performed by the instruction.
- void dropMemRefs() {
- MemRefs = nullptr;
- NumMemRefs = 0;
- }
-
/// Break any tie involving OpIdx.
void untieRegOperand(unsigned OpIdx) {
MachineOperand &MO = getOperand(OpIdx);
return *this;
}
- const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
- MachineInstr::mmo_iterator e) const {
- MI->setMemRefs(b, e);
+ const MachineInstrBuilder &
+ setMemRefs(ArrayRef<MachineMemOperand *> MMOs) const {
+ MI->setMemRefs(*MF, MMOs);
return *this;
}
- const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
- unsigned> MemOperandsRef) const {
- MI->setMemRefs(MemOperandsRef);
+ const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
+ MI->cloneMemRefs(*MF, OtherMI);
+ return *this;
+ }
+
+ const MachineInstrBuilder &
+ cloneMergedMemRefs(ArrayRef<const MachineInstr *> OtherMIs) const {
+ MI->cloneMergedMemRefs(*MF, OtherMIs);
return *this;
}
// Merge MMOs from memory operations in the common block.
if (MBBICommon->mayLoad() || MBBICommon->mayStore())
- MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI));
+ MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI});
// Drop undef flags if they aren't present in all merged instructions.
for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
MachineOperand &MO = MBBICommon->getOperand(I);
return;
MachinePointerInfo MPInfo(Global);
- MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
MachineMemOperand::MODereferenceable;
- *MemRefs =
+ MachineMemOperand *MemRef =
MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
DL->getPointerABIAlignment(0));
- MIB.setMemRefs(MemRefs, MemRefs + 1);
+ MIB.setMemRefs({MemRef});
}
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
}
}
- MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
return MIB;
}
return true;
if (MemOperands.empty())
return false;
- MachineInstr::mmo_iterator MemRefs =
- MF.allocateMemRefsArray(MemOperands.size());
- std::copy(MemOperands.begin(), MemOperands.end(), MemRefs);
- MI->setMemRefs(MemRefs, MemRefs + MemOperands.size());
+ MI->setMemRefs(MF, MemOperands);
return false;
}
MMO->getOrdering(), MMO->getFailureOrdering());
}
-MachineInstr::mmo_iterator
-MachineFunction::allocateMemRefsArray(unsigned long Num) {
- return Allocator.Allocate<MachineMemOperand *>(Num);
-}
-
-std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
-MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End) {
- // Count the number of load mem refs.
- unsigned Num = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
- if ((*I)->isLoad())
- ++Num;
-
- // Allocate a new array and populate it with the load information.
- MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
- unsigned Index = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
- if ((*I)->isLoad()) {
- if (!(*I)->isStore())
- // Reuse the MMO.
- Result[Index] = *I;
- else {
- // Clone the MMO and unset the store flag.
- MachineMemOperand *JustLoad =
- getMachineMemOperand((*I)->getPointerInfo(),
- (*I)->getFlags() & ~MachineMemOperand::MOStore,
- (*I)->getSize(), (*I)->getBaseAlignment(),
- (*I)->getAAInfo(), nullptr,
- (*I)->getSyncScopeID(), (*I)->getOrdering(),
- (*I)->getFailureOrdering());
- Result[Index] = JustLoad;
- }
- ++Index;
- }
- }
- return std::make_pair(Result, Result + Num);
-}
-
-std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
-MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End) {
- // Count the number of load mem refs.
- unsigned Num = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
- if ((*I)->isStore())
- ++Num;
-
- // Allocate a new array and populate it with the store information.
- MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
- unsigned Index = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
- if ((*I)->isStore()) {
- if (!(*I)->isLoad())
- // Reuse the MMO.
- Result[Index] = *I;
- else {
- // Clone the MMO and unset the load flag.
- MachineMemOperand *JustStore =
- getMachineMemOperand((*I)->getPointerInfo(),
- (*I)->getFlags() & ~MachineMemOperand::MOLoad,
- (*I)->getSize(), (*I)->getBaseAlignment(),
- (*I)->getAAInfo(), nullptr,
- (*I)->getSyncScopeID(), (*I)->getOrdering(),
- (*I)->getFailureOrdering());
- Result[Index] = JustStore;
- }
- ++Index;
- }
- }
- return std::make_pair(Result, Result + Num);
+MachineInstr::ExtraInfo *
+MachineFunction::createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol,
+ MCSymbol *PostInstrSymbol) {
+ return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
+ PostInstrSymbol);
}
const char *MachineFunction::createExternalSymbolName(StringRef Name) {
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
- : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
- debugLoc(MI.getDebugLoc()) {
+ : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
CapOperands = OperandCapacity::get(MI.getNumOperands());
--NumOperands;
}
-/// addMemOperand - Add a MachineMemOperand to the machine instruction.
-/// This function should be used only occasionally. The setMemRefs function
-/// is the primary method for setting up a MachineInstr's MemRefs list.
+void MachineInstr::dropMemRefs(MachineFunction &MF) {
+ if (memoperands_empty())
+ return;
+
+ // See if we can just drop all of our extra info.
+ if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
+ Info.clear();
+ return;
+ }
+ if (!getPostInstrSymbol()) {
+ Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
+ return;
+ }
+ if (!getPreInstrSymbol()) {
+ Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
+ return;
+ }
+
+ // Otherwise allocate a fresh extra info with just these symbols.
+ Info.set<EIIK_OutOfLine>(
+ MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
+}
+
+void MachineInstr::setMemRefs(MachineFunction &MF,
+ ArrayRef<MachineMemOperand *> MMOs) {
+ if (MMOs.empty()) {
+ dropMemRefs(MF);
+ return;
+ }
+
+ // Try to store a single MMO inline.
+ if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
+ Info.set<EIIK_MMO>(MMOs[0]);
+ return;
+ }
+
+ // Otherwise create an extra info struct with all of our info.
+ Info.set<EIIK_OutOfLine>(
+ MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
+}
+
void MachineInstr::addMemOperand(MachineFunction &MF,
MachineMemOperand *MO) {
- mmo_iterator OldMemRefs = MemRefs;
- unsigned OldNumMemRefs = NumMemRefs;
+ SmallVector<MachineMemOperand *, 2> MMOs;
+ MMOs.append(memoperands_begin(), memoperands_end());
+ MMOs.push_back(MO);
+ setMemRefs(MF, MMOs);
+}
- unsigned NewNum = NumMemRefs + 1;
- mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
+void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
+ if (this == &MI)
+ // Nothing to do for a self-clone!
+ return;
+
+ assert(&MF == MI.getMF() &&
+ "Invalid machine functions when cloning memory refrences!");
+ // See if we can just steal the extra info already allocated for the
+ // instruction. We can do this whenever the pre- and post-instruction symbols
+ // are the same (including null).
+ if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
+ getPostInstrSymbol() == MI.getPostInstrSymbol()) {
+ Info = MI.Info;
+ return;
+ }
- std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
- NewMemRefs[NewNum - 1] = MO;
- setMemRefs(NewMemRefs, NewMemRefs + NewNum);
+ // Otherwise, fall back on a copy-based clone.
+ setMemRefs(MF, MI.memoperands());
}
/// Check to see if the MMOs pointed to by the two MemRefs arrays are
/// identical.
-static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) {
- auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end();
- auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end();
- if ((E1 - I1) != (E2 - I2))
+static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
+ ArrayRef<MachineMemOperand *> RHS) {
+ if (LHS.size() != RHS.size())
return false;
- for (; I1 != E1; ++I1, ++I2) {
- if (**I1 != **I2)
- return false;
+
+ auto LHSPointees = make_pointee_range(LHS);
+ auto RHSPointees = make_pointee_range(RHS);
+ return std::equal(LHSPointees.begin(), LHSPointees.end(),
+ RHSPointees.begin());
+}
+
+void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
+ ArrayRef<const MachineInstr *> MIs) {
+ // Try handling easy numbers of MIs with simpler mechanisms.
+ if (MIs.empty()) {
+ dropMemRefs(MF);
+ return;
}
- return true;
+ if (MIs.size() == 1) {
+ cloneMemRefs(MF, *MIs[0]);
+ return;
+ }
+ // Because an empty memoperands list provides *no* information and must be
+ // handled conservatively (assuming the instruction can do anything), the only
+ // way to merge with it is to drop all other memoperands.
+ if (MIs[0]->memoperands_empty()) {
+ dropMemRefs(MF);
+ return;
+ }
+
+ // Handle the general case.
+ SmallVector<MachineMemOperand *, 2> MergedMMOs;
+ // Start with the first instruction.
+ assert(&MF == MIs[0]->getMF() &&
+ "Invalid machine functions when cloning memory references!");
+ MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
+ // Now walk all the other instructions and accumulate any different MMOs.
+ for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
+ assert(&MF == MI.getMF() &&
+ "Invalid machine functions when cloning memory references!");
+
+ // Skip MIs with identical operands to the first. This is a somewhat
+ // arbitrary hack but will catch common cases without being quadratic.
+ // TODO: We could fully implement merge semantics here if needed.
+ if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
+ continue;
+
+ // Because an empty memoperands list provides *no* information and must be
+ // handled conservatively (assuming the instruction can do anything), the
+ // only way to merge with it is to drop all other memoperands.
+ if (MI.memoperands_empty()) {
+ dropMemRefs(MF);
+ return;
+ }
+
+ // Otherwise accumulate these into our temporary buffer of the merged state.
+ MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
+ }
+
+ setMemRefs(MF, MergedMMOs);
}
-std::pair<MachineInstr::mmo_iterator, unsigned>
-MachineInstr::mergeMemRefsWith(const MachineInstr& Other) {
-
- // If either of the incoming memrefs are empty, we must be conservative and
- // treat this as if we've exhausted our space for memrefs and dropped them.
- if (memoperands_empty() || Other.memoperands_empty())
- return std::make_pair(nullptr, 0);
-
- // If both instructions have identical memrefs, we don't need to merge them.
- // Since many instructions have a single memref, and we tend to merge things
- // like pairs of loads from the same location, this catches a large number of
- // cases in practice.
- if (hasIdenticalMMOs(*this, Other))
- return std::make_pair(MemRefs, NumMemRefs);
-
- // TODO: consider uniquing elements within the operand lists to reduce
- // space usage and fall back to conservative information less often.
- size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs;
-
- // If we don't have enough room to store this many memrefs, be conservative
- // and drop them. Otherwise, we'd fail asserts when trying to add them to
- // the new instruction.
- if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs))
- return std::make_pair(nullptr, 0);
-
- MachineFunction *MF = getMF();
- mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs);
- mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(),
- MemBegin);
- MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(),
- MemEnd);
- assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs &&
- "missing memrefs");
-
- return std::make_pair(MemBegin, CombinedNumMemRefs);
+MCSymbol *MachineInstr::getOrCreatePreInstrTempSymbol(MCContext &MCCtx) {
+ MCSymbol *S = getPreInstrSymbol();
+ if (S)
+ return S;
+
+ // Create a new temp symbol.
+ S = MCCtx.createTempSymbol();
+
+ if (!Info) {
+ // If we don't have any other extra info, we can store this inline.
+ Info.set<EIIK_PreInstrSymbol>(S);
+ return S;
+ }
+
+ // Otherwise, allocate a fully set of extra info.
+ Info.set<EIIK_OutOfLine>(
+ getMF()->createMIExtraInfo(memoperands(), S, getPostInstrSymbol()));
+
+ return S;
+}
+
+MCSymbol *MachineInstr::getOrCreatePostInstrTempSymbol(MCContext &MCCtx) {
+ MCSymbol *S = getPostInstrSymbol();
+ if (S)
+ return S;
+
+ // Create a new temp symbol.
+ S = MCCtx.createTempSymbol();
+
+ if (!Info) {
+ // If we don't have any other extra info, we can store this inline.
+ Info.set<EIIK_PostInstrSymbol>(S);
+ return S;
+ }
+
+ // Otherwise, allocate a fully set of extra info.
+ Info.set<EIIK_OutOfLine>(
+ getMF()->createMIExtraInfo(memoperands(), getPreInstrSymbol(), S));
+ return S;
}
uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
for (unsigned Str : OF.Sequence) {
MachineInstr *NewMI =
MF.CloneMachineInstr(Mapper.IntegerInstructionMap.find(Str)->second);
- NewMI->dropMemRefs();
+ NewMI->dropMemRefs(MF);
// Don't keep debug information for outlined instructions.
NewMI->setDebugLoc(DebugLoc());
return;
// If the instruction has memory operands, then adjust the offset
// when the instruction appears in different stages.
- unsigned NumRefs = NewMI.memoperands_end() - NewMI.memoperands_begin();
- if (NumRefs == 0)
+ if (NewMI.memoperands_empty())
return;
- MachineInstr::mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NumRefs);
- unsigned Refs = 0;
+ SmallVector<MachineMemOperand *, 2> NewMMOs;
for (MachineMemOperand *MMO : NewMI.memoperands()) {
if (MMO->isVolatile() || (MMO->isInvariant() && MMO->isDereferenceable()) ||
(!MMO->getValue())) {
- NewMemRefs[Refs++] = MMO;
+ NewMMOs.push_back(MMO);
continue;
}
unsigned Delta;
if (Num != UINT_MAX && computeDelta(OldMI, Delta)) {
int64_t AdjOffset = Delta * Num;
- NewMemRefs[Refs++] =
- MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize());
+ NewMMOs.push_back(
+ MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize()));
} else {
- NewMI.dropMemRefs();
+ NewMI.dropMemRefs(MF);
return;
}
}
- NewMI.setMemRefs(NewMemRefs, NewMemRefs + NumRefs);
+ NewMI.setMemRefs(MF, NewMMOs);
}
/// Clone the instruction for the new pipelined loop and update the
MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
RegState::EarlyClobber);
- // Transfer all of the memory reference descriptions of this instruction.
- ArrayRef<MachineMemOperand *> SDNodeMemRefs =
- cast<MachineSDNode>(Node)->memoperands();
- MachineMemOperand **MemRefs = MF->allocateMemRefsArray(SDNodeMemRefs.size());
- std::copy(SDNodeMemRefs.begin(), SDNodeMemRefs.end(), MemRefs);
- MIB.setMemRefs({MemRefs, SDNodeMemRefs.size()});
+ // Set the memory reference descriptions of this instruction now that it is
+ // part of the function.
+ MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
// Insert the instruction into position in the block. This needs to
// happen before any custom inserter hook is called so that the
}
// We adjust AliasAnalysis information for merged stack slots.
- MachineInstr::mmo_iterator NewMemOps =
- MF->allocateMemRefsArray(I.getNumMemOperands());
- unsigned MemOpIdx = 0;
+ SmallVector<MachineMemOperand *, 2> NewMMOs;
bool ReplaceMemOps = false;
for (MachineMemOperand *MMO : I.memoperands()) {
// If this memory location can be a slot remapped here,
}
}
if (MayHaveConflictingAAMD) {
- NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes());
+ NewMMOs.push_back(MF->getMachineMemOperand(MMO, AAMDNodes()));
ReplaceMemOps = true;
+ } else {
+ NewMMOs.push_back(MMO);
}
- else
- NewMemOps[MemOpIdx++] = MMO;
}
// If any memory operand is updated, set memory references of
// this instruction.
if (ReplaceMemOps)
- I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands()));
+ I.setMemRefs(*MF, NewMMOs);
}
// Update the location of C++ catch objects for the MSVC personality routine.
}
if (NewMI) {
- NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ NewMI->setMemRefs(MF, MI.memoperands());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->mayStore()) &&
// Copy the memoperands from the load to the folded instruction.
if (MI.memoperands_empty()) {
- NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
+ NewMI->setMemRefs(MF, LoadMI.memoperands());
} else {
// Handle the rare case of folding multiple loads.
- NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ NewMI->setMemRefs(MF, MI.memoperands());
for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
E = LoadMI.memoperands_end();
I != E; ++I) {
MIB.add(MI->getOperand(i));
// Inherit previous memory operands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.cloneMemRefs(*MI);
assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
// Add a new memory operand for this FI.
MIB.addImm(CSStackSizeIncImm);
MIB.setMIFlags(MBBI->getFlags());
- MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end());
+ MIB.setMemRefs(MBBI->memoperands());
return std::prev(MBB.erase(MBBI));
}
.addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
.add(BaseRegOp)
.addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*MergeMI))
+ .cloneMergedMemRefs({&*I, &*MergeMI})
.setMIFlags(I->mergeFlagsWith(*MergeMI));
(void)MIB;
.add(RegOp1)
.add(BaseRegOp)
.addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*Paired))
+ .cloneMergedMemRefs({&*I, &*Paired})
.setMIFlags(I->mergeFlagsWith(*Paired));
(void)MIB;
.add(getLdStRegOp(*I))
.add(getLdStBaseOp(*I))
.addImm(Value)
- .setMemRefs(I->memoperands_begin(), I->memoperands_end())
+ .setMemRefs(I->memoperands())
.setMIFlags(I->mergeFlagsWith(*Update));
} else {
// Paired instruction.
.add(getLdStRegOp(*I, 1))
.add(getLdStBaseOp(*I))
.addImm(Value / Scale)
- .setMemRefs(I->memoperands_begin(), I->memoperands_end())
+ .setMemRefs(I->memoperands())
.setMIFlags(I->mergeFlagsWith(*Update));
}
(void)MIB;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
MIB.add(MI.getOperand(I));
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return BB;
}
MIB.addImm(TFE->getImm());
}
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
Addr64 = MIB;
} else {
// Atomics with return.
.add(*SOffset)
.add(*Offset)
.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .cloneMemRefs(MI);
}
MI.removeFromParent();
}
MachineInstr *NewInstr =
- BuildMI(*MBB, Inst, Inst.getDebugLoc(),
- get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst)
- .add(*VAddr) // vaddr
- .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc
- .addImm(0) // soffset
- .addImm(Offset) // offset
- .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm())
- .addImm(0) // slc
- .addImm(0) // tfe
- .setMemRefs(Inst.memoperands_begin(), Inst.memoperands_end())
- .getInstr();
+ BuildMI(*MBB, Inst, Inst.getDebugLoc(),
+ get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst)
+ .add(*VAddr) // vaddr
+ .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc
+ .addImm(0) // soffset
+ .addImm(Offset) // offset
+ .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm())
+ .addImm(0) // slc
+ .addImm(0) // tfe
+ .cloneMemRefs(Inst)
+ .getInstr();
MRI.replaceRegWith(getNamedOperand(Inst, AMDGPU::OpName::sdst)->getReg(),
VDst);
.addReg(AddrReg->getReg());
}
- MachineInstrBuilder Read2 =
- BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
- .addReg(BaseReg, BaseRegFlags) // addr
- .addImm(NewOffset0) // offset0
- .addImm(NewOffset1) // offset1
- .addImm(0) // gds
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ MachineInstrBuilder Read2 = BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
+ .addReg(BaseReg, BaseRegFlags) // addr
+ .addImm(NewOffset0) // offset0
+ .addImm(NewOffset1) // offset1
+ .addImm(0) // gds
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
(void)Read2;
.addReg(AddrReg->getReg());
}
- MachineInstrBuilder Write2 =
- BuildMI(*MBB, CI.Paired, DL, Write2Desc)
- .addReg(BaseReg, BaseRegFlags) // addr
- .add(*Data0) // data0
- .add(*Data1) // data1
- .addImm(NewOffset0) // offset0
- .addImm(NewOffset1) // offset1
- .addImm(0) // gds
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ MachineInstrBuilder Write2 = BuildMI(*MBB, CI.Paired, DL, Write2Desc)
+ .addReg(BaseReg, BaseRegFlags) // addr
+ .add(*Data0) // data0
+ .add(*Data1) // data1
+ .addImm(NewOffset0) // offset0
+ .addImm(NewOffset1) // offset1
+ .addImm(0) // gds
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
moveInstsAfter(Write2, CI.InstsToMove);
.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
.addImm(MergedOffset) // offset
.addImm(CI.GLC0) // glc
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
.addImm(CI.GLC0) // glc
.addImm(CI.SLC0) // slc
.addImm(0) // tfe
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
.addImm(std::min(CI.Offset0, CI.Offset1)) // offset
- .addImm(CI.GLC0) // glc
- .addImm(CI.SLC0) // slc
- .addImm(0) // tfe
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ .addImm(CI.GLC0) // glc
+ .addImm(CI.SLC0) // slc
+ .addImm(0) // tfe
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
moveInstsAfter(MIB, CI.InstsToMove);
return false;
const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
- MachineInstrBuilder NewMI = BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
- .add(*Reg)
- .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
- .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
- .addImm(Offset)
- .addImm(0) // glc
- .addImm(0) // slc
- .addImm(0) // tfe
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MachineInstrBuilder NewMI =
+ BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
+ .add(*Reg)
+ .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
+ .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
+ .addImm(Offset)
+ .addImm(0) // glc
+ .addImm(0) // slc
+ .addImm(0) // tfe
+ .cloneMemRefs(*MI);
const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
AMDGPU::OpName::vdata_in);
MachineFunction &MF = *MBB.getParent();
unsigned CPI = Orig.getOperand(1).getIndex();
unsigned PCLabelId = duplicateCPV(MF, CPI);
- MachineInstrBuilder MIB =
- BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
- .addConstantPoolIndex(CPI)
- .addImm(PCLabelId);
- MIB->setMemRefs(Orig.memoperands_begin(), Orig.memoperands_end());
+ BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
+ .addConstantPoolIndex(CPI)
+ .addImm(PCLabelId)
+ .cloneMemRefs(Orig);
break;
}
}
MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
MIB.addReg(Reg, RegState::Kill)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end())
- .add(predOps(ARMCC::AL));
+ .addImm(0)
+ .cloneMemRefs(*MI)
+ .add(predOps(ARMCC::AL));
}
bool
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
}
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
}
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
}
unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
LO16 = LO16.addImm(SOImmValV1);
HI16 = HI16.addImm(SOImmValV2);
- LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ LO16.cloneMemRefs(MI);
+ HI16.cloneMemRefs(MI);
LO16.addImm(Pred).addReg(PredReg).add(condCodeOp());
HI16.addImm(Pred).addReg(PredReg).add(condCodeOp());
if (isCC)
}
}
- LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ LO16.cloneMemRefs(MI);
+ HI16.cloneMemRefs(MI);
LO16.addImm(Pred).addReg(PredReg);
HI16.addImm(Pred).addReg(PredReg);
MIB.addExternalSymbol("__aeabi_read_tp", 0);
}
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
TransferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg)
.add(MI.getOperand(1))
.add(predOps(ARMCC::AL));
- MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB1.cloneMemRefs(MI);
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
if (isARM) {
MIB3.add(predOps(ARMCC::AL));
if (Opcode == ARM::MOV_ga_pcrel_ldr)
- MIB3->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB3.cloneMemRefs(MI);
}
TransferImpOps(MI, MIB1, MIB3);
MI.eraseFromParent();
// Add an implicit def for the super-register.
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return true;
}
MIB->addRegisterKilled(SrcReg, TRI, true);
TransferImpOps(MI, MIB, MIB);
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return true;
}
BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg)
.addReg(Reg, RegState::Kill)
.addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end())
+ .cloneMemRefs(*MI)
.add(predOps(ARMCC::AL));
}
MIB.add(MI->getOperand(OpNum));
// Transfer memoperands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
MBB.erase(MBBI);
return true;
// Transfer implicit operands.
for (const MachineOperand &MO : MI.implicit_operands())
MIB.add(MO);
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.setMemRefs(MI.memoperands());
MBB.erase(MBBI);
return true;
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
+ MIB.cloneMergedMemRefs({Op0, Op1});
LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumLDRDFormed;
} else {
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
+ MIB.cloneMergedMemRefs({Op0, Op1});
LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumSTRDFormed;
}
.addReg(Rt, IsStore ? 0 : RegState::Define);
// Transfer memoperands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
MIB.add(MI->getOperand(OpNum));
// Transfer memoperands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
llvm_unreachable("Unknown operand type!");
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead))
.addReg(SrcReg, RegState::Kill);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead))
.addReg(SrcReg, RegState::Kill);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
MIBLO.addReg(SrcLoReg, getKillRegState(SrcIsKill));
MIBHI.addReg(SrcHiReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addImm(1)
.addReg(SrcHiReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addReg(SrcHiReg, getKillRegState(SrcIsKill))
.addImm(Imm);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addReg(SrcLoReg, getKillRegState(SrcIsKill))
.addImm(Imm);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addImm(Imm + 1)
.addReg(SrcHiReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead))
.addImm(Imm + 1);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addImm(Imm)
.addReg(SrcLoReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
else
MIB.add(MachineOperand(ExtR));
}
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
// Add the stored value for stores.
if (MI.mayStore())
MIB.add(getStoredValueOp(MI));
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
// Add the stored value for stores.
if (MI.mayStore())
MIB.add(getStoredValueOp(MI));
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
MIB.add(MO);
// Set memory references.
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(*MI);
MI->eraseFromParent();
return;
MB.add(MO);
Ox++;
}
-
- MachineFunction &MF = *B.getParent();
- MachineInstr::mmo_iterator I = MI.memoperands_begin();
- unsigned NR = std::distance(I, MI.memoperands_end());
- MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(NR);
- for (unsigned i = 0; i < NR; ++i)
- MemRefs[i] = *I++;
- MB.setMemRefs(MemRefs, MemRefs+NR);
+ MB.cloneMemRefs(MI);
MachineInstr *NewI = MB;
NewI->clearKillInfo();
// S2_storeri_io FI, 0, TmpR
BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(TmpR, RegState::Kill)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(TmpR, RegState::Kill)
+ .cloneMemRefs(*MI);
NewRegs.push_back(TmpR);
B.erase(It);
// TmpR = L2_loadri_io FI, 0
unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
- .addFrameIndex(FI)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .cloneMemRefs(*MI);
// DstR = C2_tfrrp TmpR if DstR is a predicate register
// DstR = A2_tfrrcr TmpR if DstR is a modifier register
StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
: Hexagon::V6_vS32Ub_ai;
BuildMI(B, It, DL, HII.get(StoreOpc))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(SrcLo, getKillRegState(IsKill))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(SrcLo, getKillRegState(IsKill))
+ .cloneMemRefs(*MI);
}
// Store high part.
StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
: Hexagon::V6_vS32Ub_ai;
BuildMI(B, It, DL, HII.get(StoreOpc))
- .addFrameIndex(FI)
- .addImm(Size)
- .addReg(SrcHi, getKillRegState(IsKill))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(Size)
+ .addReg(SrcHi, getKillRegState(IsKill))
+ .cloneMemRefs(*MI);
}
B.erase(It);
LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
: Hexagon::V6_vL32Ub_ai;
BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
- .addFrameIndex(FI)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .cloneMemRefs(*MI);
// Load high part.
LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
: Hexagon::V6_vL32Ub_ai;
BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
- .addFrameIndex(FI)
- .addImm(Size)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(Size)
+ .cloneMemRefs(*MI);
B.erase(It);
return true;
unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
: Hexagon::V6_vS32Ub_ai;
BuildMI(B, It, DL, HII.get(StoreOpc))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(SrcR, getKillRegState(IsKill))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(SrcR, getKillRegState(IsKill))
+ .cloneMemRefs(*MI);
B.erase(It);
return true;
unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
: Hexagon::V6_vL32Ub_ai;
BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
- .addFrameIndex(FI)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .cloneMemRefs(*MI);
B.erase(It);
return true;
unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
- MachineInstr *MI1New =
- BuildMI(MBB, MI, DL, get(NewOpc))
- .add(MI.getOperand(0))
- .addImm(MI.getOperand(1).getImm())
- .addReg(SrcSubLo)
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc))
+ .add(MI.getOperand(0))
+ .addImm(MI.getOperand(1).getImm())
+ .addReg(SrcSubLo)
+ .cloneMemRefs(MI);
MI1New->getOperand(0).setIsKill(false);
BuildMI(MBB, MI, DL, get(NewOpc))
.add(MI.getOperand(0))
// The Vectors are indexed in multiples of vector size.
.addImm(MI.getOperand(1).getImm() + Offset)
.addReg(SrcSubHi)
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc),
HRI.getSubReg(DstReg, Hexagon::vsub_lo))
- .add(MI.getOperand(1))
- .addImm(MI.getOperand(2).getImm())
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .add(MI.getOperand(1))
+ .addImm(MI.getOperand(2).getImm())
+ .cloneMemRefs(MI);
MI1New->getOperand(1).setIsKill(false);
BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi))
.add(MI.getOperand(1))
// The Vectors are indexed in multiples of vector size.
.addImm(MI.getOperand(2).getImm() + Offset)
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
InstrBuilder.addImm(LPAC::makePostOp(AluOpcode));
// Transfer memory operands.
- InstrBuilder->setMemRefs(MemInstr->memoperands_begin(),
- MemInstr->memoperands_end());
+ InstrBuilder.setMemRefs(MemInstr->memoperands());
}
// Function determines if ALU operation (in alu_iter) can be combined with
}
MIB.copyImplicitOps(*I);
-
- MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
+ MIB.cloneMemRefs(*I);
return MIB;
}
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
- // Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
-
unsigned DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
- .addReg(PPC::X2)
- .addImm(TOCOffset)
- .addReg(BufReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ .addReg(PPC::X2)
+ .addImm(TOCOffset)
+ .addReg(BufReg)
+ .cloneMemRefs(MI);
}
// Naked functions never have a base pointer, and so we use r1. For all
TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
.addReg(BaseReg)
.addImm(BPOffset)
- .addReg(BufReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ .addReg(BufReg)
+ .cloneMemRefs(MI);
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
.addImm(LabelOffset)
.addReg(BufReg);
}
-
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
mainMBB->addSuccessor(sinkMBB);
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- // Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
-
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
.addImm(0)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload IP
if (PVT == MVT::i64) {
.addImm(LabelOffset)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload SP
if (PVT == MVT::i64) {
.addImm(SPOffset)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload BP
if (PVT == MVT::i64) {
.addImm(BPOffset)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload TOC
if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
- .addImm(TOCOffset)
- .addReg(BufReg);
-
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ .addImm(TOCOffset)
+ .addReg(BufReg)
+ .cloneMemRefs(MI);
}
// Jump
return false;
// Rebuild to get the CC operand in the right place.
- MachineInstr *BuiltMI =
- BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode));
+ auto MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode));
for (const auto &MO : MI.operands())
- BuiltMI->addOperand(MO);
- BuiltMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.add(MO);
+ MIB.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
.addImm(ThisLength)
.add(SrcBase)
.addImm(SrcDisp)
- ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .setMemRefs(MI.memoperands());
DestDisp += ThisLength;
SrcDisp += ThisLength;
Length -= ThisLength;
for (unsigned i = 1; i < NumArgs; ++i)
MIB.add(MI->getOperand(i));
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
return MIB;
}
MIB.addReg(CondReg);
- MIB->setMemRefs(SetCCI.memoperands_begin(), SetCCI.memoperands_end());
+ MIB.setMemRefs(SetCCI.memoperands());
SetCCI.eraseFromParent();
return;
// Memory Reference
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
// Machine Information
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Check if there is enough room left to pull this argument.
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
.add(Index)
.addDisp(Disp, 16)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Zero-extend the offset
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.addReg(NextOffsetReg)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Jump to endMBB
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// If we need to align it, do so. Otherwise, just copy the address
// to OverflowDestReg.
.addDisp(Disp, 8)
.add(Segment)
.addReg(NextAddrReg)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// If we branched, emit the PHI to the front of endMBB.
if (offsetMBB) {
MachineInstrBuilder MIB;
// Memory Reference.
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
// Initialize a register with zero.
MVT PVT = getPointerTy(MF->getDataLayout());
MIB.add(MI.getOperand(MemOpndSlot + i));
}
MIB.addReg(SSPCopyReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
}
MachineBasicBlock *
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
unsigned DstReg;
unsigned MemOpndSlot = 0;
MIB.addReg(LabelReg);
else
MIB.addMBB(restoreMBB);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
emitSetJmpShadowStackFix(MI, thisMBB);
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Subtract the current SSP from the previous SSP.
unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Reload IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Reload SP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Jump
BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
.addReg(SrcReg, getKillRegState(isKill));
}
-void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+void X86InstrInfo::storeRegToAddr(
+ MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
+ ArrayRef<MachineMemOperand *> MMOs,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = MMOBegin != MMOEnd &&
- (*MMOBegin)->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
MIB.addReg(SrcReg, getKillRegState(isKill));
- (*MIB).setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
-void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+void X86InstrInfo::loadRegFromAddr(
+ MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
+ ArrayRef<MachineMemOperand *> MMOs,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = MMOBegin != MMOEnd &&
- (*MMOBegin)->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
- (*MIB).setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
// Emit the load instruction.
if (UnfoldLoad) {
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
- MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
+ auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
+ loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
- MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
+ auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
+ storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs);
}
return true;
void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
+ ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
+ ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const;
bool expandPostRAPseudo(MachineInstr &MI) const override;