From: Bjorn Pettersson Date: Fri, 19 Apr 2019 09:08:38 +0000 (+0000) Subject: [CodeGen] Add "const" to MachineInstr::mayAlias X-Git-Tag: llvmorg-10-init~7468 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=238c9d6308df84473aecbc993cd09a53c057fe0f;p=platform%2Fupstream%2Fllvm.git [CodeGen] Add "const" to MachineInstr::mayAlias Summary: The basic idea here is to make it possible to use MachineInstr::mayAlias also when the MachineInstr is const (or the "Other" MachineInstr is const). The addition of const in MachineInstr::mayAlias then rippled down to the need for adding const in several other places, such as TargetTransformInfo::getMemOperandWithOffset. Reviewers: hfinkel Reviewed By: hfinkel Subscribers: hfinkel, MatzeB, arsenm, jvesely, nhaehnle, hiraditya, javed.absar, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D60856 llvm-svn: 358744 --- diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index a85fee7..5f671de 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -1378,7 +1378,7 @@ public: /// @param AA Optional alias analysis, used to compare memory operands. /// @param Other MachineInstr to check aliasing against. /// @param UseTBAA Whether to pass TBAA information to alias analysis. - bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA); + bool mayAlias(AliasAnalysis *AA, const MachineInstr &Other, bool UseTBAA) const; /// Return true if this instruction may have an ordered /// or volatile memory reference, or if the information describing the memory diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index b732be6..3ba4d40 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -1144,8 +1144,9 @@ public: /// Get the base operand and byte offset of an instruction that reads/writes /// memory. - virtual bool getMemOperandWithOffset(MachineInstr &MI, - MachineOperand *&BaseOp, int64_t &Offset, + virtual bool getMemOperandWithOffset(const MachineInstr &MI, + const MachineOperand *&BaseOp, + int64_t &Offset, const TargetRegisterInfo *TRI) const { return false; } @@ -1170,8 +1171,8 @@ public: /// or /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); /// to TargetPassConfig::createMachineScheduler() to have an effect. - virtual bool shouldClusterMemOps(MachineOperand &BaseOp1, - MachineOperand &BaseOp2, + virtual bool shouldClusterMemOps(const MachineOperand &BaseOp1, + const MachineOperand &BaseOp2, unsigned NumLoads) const { llvm_unreachable("target did not implement shouldClusterMemOps()"); } @@ -1548,7 +1549,8 @@ public: /// See also MachineInstr::mayAlias, which is implemented on top of this /// function. virtual bool - areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA = nullptr) const { assert((MIa.mayLoad() || MIa.mayStore()) && "MIa must load from or modify a memory location"); diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 6811031..dff6d02 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -180,7 +180,8 @@ class ImplicitNullChecks : public MachineFunctionPass { /// Returns AR_NoAlias if \p MI memory operation does not alias with /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if /// they may alias and any further memory operation may alias with \p PrevMI. - AliasResult areMemoryOpsAliased(MachineInstr &MI, MachineInstr *PrevMI); + AliasResult areMemoryOpsAliased(const MachineInstr &MI, + const MachineInstr *PrevMI) const; enum SuitabilityResult { SR_Suitable, @@ -194,7 +195,8 @@ class ImplicitNullChecks : public MachineFunctionPass { /// no sense to continue lookup due to any other instruction will not be able /// to be used. \p PrevInsts is the set of instruction seen since /// the explicit null check on \p PointerReg. - SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, + SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI, + unsigned PointerReg, ArrayRef PrevInsts); /// Return true if \p FaultingMI can be hoisted from after the @@ -318,8 +320,8 @@ static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, } ImplicitNullChecks::AliasResult -ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI, - MachineInstr *PrevMI) { +ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI, + const MachineInstr *PrevMI) const { // If it is not memory access, skip the check. if (!(PrevMI->mayStore() || PrevMI->mayLoad())) return AR_NoAlias; @@ -356,10 +358,11 @@ ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI, } ImplicitNullChecks::SuitabilityResult -ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, +ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, + unsigned PointerReg, ArrayRef PrevInsts) { int64_t Offset; - MachineOperand *BaseOp; + const MachineOperand *BaseOp; if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) || !BaseOp->isReg() || BaseOp->getReg() != PointerReg) diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 26d5834..7fdcb31 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -1181,8 +1181,8 @@ bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const { return true; } -bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other, - bool UseTBAA) { +bool MachineInstr::mayAlias(AliasAnalysis *AA, const MachineInstr &Other, + bool UseTBAA) const { const MachineFunction *MF = getMF(); const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); const MachineFrameInfo &MFI = MF->getFrameInfo(); diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp index 1f7c48f..fde6531 100644 --- a/llvm/lib/CodeGen/MachinePipeliner.cpp +++ b/llvm/lib/CodeGen/MachinePipeliner.cpp @@ -597,7 +597,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) { // First, perform the cheaper check that compares the base register. // If they are the same and the load offset is less than the store // offset, then mark the dependence as loop carried potentially. - MachineOperand *BaseOp1, *BaseOp2; + const MachineOperand *BaseOp1, *BaseOp2; int64_t Offset1, Offset2; if (TII->getMemOperandWithOffset(LdMI, BaseOp1, Offset1, TRI) && TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, TRI)) { @@ -2725,7 +2725,7 @@ void SwingSchedulerDAG::addBranches(MBBVectorTy &PrologBBs, /// during each iteration. Set Delta to the amount of the change. bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - MachineOperand *BaseOp; + const MachineOperand *BaseOp; int64_t Offset; if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI)) return false; @@ -3139,7 +3139,7 @@ bool SwingSchedulerDAG::isLoopCarriedDep(SUnit *Source, const SDep &Dep, if (!computeDelta(*SI, DeltaS) || !computeDelta(*DI, DeltaD)) return true; - MachineOperand *BaseOpS, *BaseOpD; + const MachineOperand *BaseOpS, *BaseOpD; int64_t OffsetS, OffsetD; const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TII->getMemOperandWithOffset(*SI, BaseOpS, OffsetS, TRI) || diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index d56ea43..4b61dad 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -1466,10 +1466,10 @@ namespace { class BaseMemOpClusterMutation : public ScheduleDAGMutation { struct MemOpInfo { SUnit *SU; - MachineOperand *BaseOp; + const MachineOperand *BaseOp; int64_t Offset; - MemOpInfo(SUnit *su, MachineOperand *Op, int64_t ofs) + MemOpInfo(SUnit *su, const MachineOperand *Op, int64_t ofs) : SU(su), BaseOp(Op), Offset(ofs) {} bool operator<(const MemOpInfo &RHS) const { @@ -1555,7 +1555,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps( ArrayRef MemOps, ScheduleDAGInstrs *DAG) { SmallVector MemOpRecords; for (SUnit *SU : MemOps) { - MachineOperand *BaseOp; + const MachineOperand *BaseOp; int64_t Offset; if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI)) MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset)); diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index f4fcbeb..fc61a34 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -715,7 +715,7 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI, !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit)) return false; - MachineOperand *BaseOp; + const MachineOperand *BaseOp; int64_t Offset; if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI)) return false; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 12f2576..2819c50 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -927,9 +927,9 @@ bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, } bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint( - MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const { + const MachineInstr &MIa, const MachineInstr &MIb, AliasAnalysis *AA) const { const TargetRegisterInfo *TRI = &getRegisterInfo(); - MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; + const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; int64_t OffsetA = 0, OffsetB = 0; unsigned WidthA = 0, WidthB = 0; @@ -1894,7 +1894,7 @@ unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc, // Is this a candidate for ld/st merging or pairing? For example, we don't // touch volatiles or load/stores that have a hint to avoid pair formation. -bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const { +bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const { // If this is a volatile load/store, don't mess with it. if (MI.hasOrderedMemoryRef()) return false; @@ -1936,8 +1936,8 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const { return true; } -bool AArch64InstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, - MachineOperand *&BaseOp, +bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { unsigned Width; @@ -1945,7 +1945,7 @@ bool AArch64InstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, } bool AArch64InstrInfo::getMemOperandWithOffsetWidth( - MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset, + const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const { assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); // Handle only loads/stores with base register followed by immediate offset. @@ -2244,11 +2244,11 @@ static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1, /// Detect opportunities for ldp/stp formation. /// /// Only called for LdSt for which getMemOperandWithOffset returns true. -bool AArch64InstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1, - MachineOperand &BaseOp2, +bool AArch64InstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, + const MachineOperand &BaseOp2, unsigned NumLoads) const { - MachineInstr &FirstLdSt = *BaseOp1.getParent(); - MachineInstr &SecondLdSt = *BaseOp2.getParent(); + const MachineInstr &FirstLdSt = *BaseOp1.getParent(); + const MachineInstr &SecondLdSt = *BaseOp2.getParent(); if (BaseOp1.getType() != BaseOp2.getType()) return false; @@ -4918,8 +4918,8 @@ AArch64InstrInfo::getOutliningCandidateInfo( // At this point, we have a stack instruction that we might need to // fix up. We'll handle it if it's a load or store. if (MI.mayLoadOrStore()) { - MachineOperand *Base; // Filled with the base operand of MI. - int64_t Offset; // Filled with the offset of MI. + const MachineOperand *Base; // Filled with the base operand of MI. + int64_t Offset; // Filled with the offset of MI. // Does it allow us to offset the base operand and is the base the // register SP? @@ -5288,7 +5288,7 @@ AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const { for (MachineInstr &MI : MBB) { - MachineOperand *Base; + const MachineOperand *Base; unsigned Width; int64_t Offset; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index 148b7af..b33edc5 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -54,7 +54,8 @@ public: unsigned &DstReg, unsigned &SubIdx) const override; bool - areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA = nullptr) const override; unsigned isLoadFromStackSlot(const MachineInstr &MI, @@ -100,16 +101,18 @@ public: static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit); /// Return true if this is a load/store that can be potentially paired/merged. - bool isCandidateToMergeOrPair(MachineInstr &MI) const; + bool isCandidateToMergeOrPair(const MachineInstr &MI) const; /// Hint that pairing the given load or store is unprofitable. static void suppressLdStPair(MachineInstr &MI); - bool getMemOperandWithOffset(MachineInstr &MI, MachineOperand *&BaseOp, + bool getMemOperandWithOffset(const MachineInstr &MI, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override; - bool getMemOperandWithOffsetWidth(MachineInstr &MI, MachineOperand *&BaseOp, + bool getMemOperandWithOffsetWidth(const MachineInstr &MI, + const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const; @@ -123,7 +126,8 @@ public: static bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width, int64_t &MinOffset, int64_t &MaxOffset); - bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2, + bool shouldClusterMemOps(const MachineOperand &BaseOp1, + const MachineOperand &BaseOp2, unsigned NumLoads) const override; void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp index 27ddd7a..0e84a00 100644 --- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -147,7 +147,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) { for (auto &MI : MBB) { if (!isNarrowFPStore(MI)) continue; - MachineOperand *BaseOp; + const MachineOperand *BaseOp; int64_t Offset; if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) && BaseOp->isReg()) { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 45b0a5b..8ce3fd2 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -256,8 +256,8 @@ static bool isStride64(unsigned Opc) { } } -bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, - MachineOperand *&BaseOp, +bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { unsigned Opc = LdSt.getOpcode(); @@ -321,7 +321,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, if (SOffset && SOffset->isReg()) return false; - MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); + const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); if (!AddrReg) return false; @@ -344,7 +344,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, if (!OffsetImm) return false; - MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); + const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); BaseOp = SBaseReg; Offset = OffsetImm->getImm(); assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " @@ -353,7 +353,7 @@ bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, } if (isFLAT(LdSt)) { - MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); + const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); if (VAddr) { // Can't analyze 2 offsets. if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) @@ -409,11 +409,11 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, return Base1 == Base2; } -bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1, - MachineOperand &BaseOp2, +bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, + const MachineOperand &BaseOp2, unsigned NumLoads) const { - MachineInstr &FirstLdSt = *BaseOp1.getParent(); - MachineInstr &SecondLdSt = *BaseOp2.getParent(); + const MachineInstr &FirstLdSt = *BaseOp1.getParent(); + const MachineInstr &SecondLdSt = *BaseOp2.getParent(); if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2)) return false; @@ -2223,9 +2223,9 @@ static bool offsetsDoNotOverlap(int WidthA, int OffsetA, return LowOffset + LowWidth <= HighOffset; } -bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, - MachineInstr &MIb) const { - MachineOperand *BaseOp0, *BaseOp1; +bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, + const MachineInstr &MIb) const { + const MachineOperand *BaseOp0, *BaseOp1; int64_t Offset0, Offset1; if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) && @@ -2247,8 +2247,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, return false; } -bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa, - MachineInstr &MIb, +bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA) const { assert((MIa.mayLoad() || MIa.mayStore()) && "MIa must load from or modify a memory location"); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 13e3dbd..a22f5a6 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -127,7 +127,8 @@ private: const TargetRegisterClass * getDestEquivalentVGPRClass(const MachineInstr &Inst) const; - bool checkInstOffsetsDoNotOverlap(MachineInstr &MIa, MachineInstr &MIb) const; + bool checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, + const MachineInstr &MIb) const; unsigned findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const; @@ -172,11 +173,13 @@ public: int64_t &Offset1, int64_t &Offset2) const override; - bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp, + bool getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const final; - bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2, + bool shouldClusterMemOps(const MachineOperand &BaseOp1, + const MachineOperand &BaseOp2, unsigned NumLoads) const override; bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0, @@ -293,7 +296,8 @@ public: unsigned Kind) const override; bool - areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA = nullptr) const override; bool isFoldableCopy(const MachineInstr &MI) const; diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp index 343c8b6..ebbdf80 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -1956,7 +1956,7 @@ void SIScheduleDAGMI::schedule() for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) { SUnit *SU = &SUnits[i]; - MachineOperand *BaseLatOp; + const MachineOperand *BaseLatOp; int64_t OffLatReg; if (SITII->isLowLatencyInstruction(*SU->getInstr())) { IsLowLatencySU[i] = 1; diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 3ecb142..545dd15 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1854,7 +1854,8 @@ DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState( // S2_storeri_io %r29, 132, killed %r1; flags: mem:ST4[FixedStack1] // Currently AA considers the addresses in these instructions to be aliasing. bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint( - MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const { + const MachineInstr &MIa, const MachineInstr &MIb, + AliasAnalysis *AA) const { if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) return false; @@ -2925,7 +2926,7 @@ bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1, /// Get the base register and byte offset of a load/store instr. bool HexagonInstrInfo::getMemOperandWithOffset( - MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset, + const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { unsigned AccessSize = 0; BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize); diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h index 688af2a..8d33964 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h @@ -215,7 +215,8 @@ public: bool expandPostRAPseudo(MachineInstr &MI) const override; /// Get the base register and byte offset of a load/store instr. - bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp, + bool getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override; @@ -295,7 +296,8 @@ public: // memory addresses. This function returns true if two MIs access different // memory addresses and false otherwise. bool - areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA = nullptr) const override; /// For instructions with a base and offset, return the position of the diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp index 93db67c..dd45797 100644 --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -86,7 +86,8 @@ void LanaiInstrInfo::loadRegFromStackSlot( } bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint( - MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis * /*AA*/) const { + const MachineInstr &MIa, const MachineInstr &MIb, + AliasAnalysis * /*AA*/) const { assert(MIa.mayLoadOrStore() && "MIa must be a load or store."); assert(MIb.mayLoadOrStore() && "MIb must be a load or store."); @@ -100,7 +101,7 @@ bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint( // the width doesn't overlap the offset of a higher memory access, // then the memory accesses are different. const TargetRegisterInfo *TRI = &getRegisterInfo(); - MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; + const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; int64_t OffsetA = 0, OffsetB = 0; unsigned int WidthA = 0, WidthB = 0; if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) && @@ -755,7 +756,7 @@ unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr &MI, } bool LanaiInstrInfo::getMemOperandWithOffsetWidth( - MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset, + const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo * /*TRI*/) const { // Handle only loads/stores with base register followed by immediate offset // and with add as ALU op. @@ -793,8 +794,8 @@ bool LanaiInstrInfo::getMemOperandWithOffsetWidth( return true; } -bool LanaiInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, - MachineOperand *&BaseOp, +bool LanaiInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { switch (LdSt.getOpcode()) { diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.h b/llvm/lib/Target/Lanai/LanaiInstrInfo.h index feda529..d71424a 100644 --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.h +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.h @@ -35,7 +35,8 @@ public: return RegisterInfo; } - bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA) const override; unsigned isLoadFromStackSlot(const MachineInstr &MI, @@ -67,11 +68,13 @@ public: bool expandPostRAPseudo(MachineInstr &MI) const override; - bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp, + bool getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override; - bool getMemOperandWithOffsetWidth(MachineInstr &LdSt, MachineOperand *&BaseOp, + bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const; diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index cb963a1..54977d9 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -1782,7 +1782,8 @@ void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, } bool SystemZInstrInfo:: -areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, +areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA) const { if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h index c5d1917..150028b 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -313,7 +313,8 @@ public: // addresses. This function returns true if two MIs access different // memory addresses and false otherwise. bool - areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb, AliasAnalysis *AA = nullptr) const override; }; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 7b68cc2..480292b 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -2958,7 +2958,7 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg, } bool X86InstrInfo::getMemOperandWithOffset( - MachineInstr &MemOp, MachineOperand *&BaseOp, int64_t &Offset, + const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { const MCInstrDesc &Desc = MemOp.getDesc(); int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 49f7660..de4ed69 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -288,7 +288,8 @@ public: SmallVectorImpl &Cond, bool AllowModify) const override; - bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp, + bool getMemOperandWithOffset(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override; bool analyzeBranchPredicate(MachineBasicBlock &MBB,