From 038ede2a16a55106ada3970265037ba0327385bb Mon Sep 17 00:00:00 2001 From: Renato Golin Date: Fri, 9 Mar 2018 21:05:58 +0000 Subject: [PATCH] [NFC] Consolidate six getPointerOperand() utility functions into one place There are six separate instances of getPointerOperand() utility. LoopVectorize.cpp has one of them, and I don't want to create a 7th one while I'm trying to move LoopVectorizationLegality into a separate file (eventual objective is to move it to Analysis tree). See http://lists.llvm.org/pipermail/llvm-dev/2018-February/120999.html for llvm-dev discussions Closes D43323. Patch by Hideki Saito . llvm-svn: 327173 --- llvm/include/llvm/IR/Instructions.h | 20 +++++++++ llvm/lib/Analysis/Delinearization.cpp | 12 +---- llvm/lib/Analysis/DependenceAnalysis.cpp | 27 +++++------ llvm/lib/Analysis/LoopAccessAnalysis.cpp | 14 +----- llvm/lib/Transforms/Scalar/EarlyCSE.cpp | 7 +-- .../Transforms/Vectorize/LoadStoreVectorizer.cpp | 22 +++------ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 52 +++++++++------------- 7 files changed, 62 insertions(+), 92 deletions(-) diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h index 66e0261..9a39ccc 100644 --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -5043,6 +5043,26 @@ public: } }; +/// A helper function that returns the pointer operand of a load or store +/// instruction. Returns nullptr if not load or store. +inline Value *getLoadStorePointerOperand(Value *V) { + if (auto *Load = dyn_cast(V)) + return Load->getPointerOperand(); + if (auto *Store = dyn_cast(V)) + return Store->getPointerOperand(); + return nullptr; +} + +/// A helper function that returns the pointer operand of a load, store +/// or GEP instruction. Returns nullptr if not load, store, or GEP. +inline Value *getPointerOperand(Value *V) { + if (auto *Ptr = getLoadStorePointerOperand(V)) + return Ptr; + if (auto *Gep = dyn_cast(V)) + return Gep->getPointerOperand(); + return nullptr; +} + } // end namespace llvm #endif // LLVM_IR_INSTRUCTIONS_H diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp index dd5af9d..4cafb7d 100644 --- a/llvm/lib/Analysis/Delinearization.cpp +++ b/llvm/lib/Analysis/Delinearization.cpp @@ -69,16 +69,6 @@ bool Delinearization::runOnFunction(Function &F) { return false; } -static Value *getPointerOperand(Instruction &Inst) { - if (LoadInst *Load = dyn_cast(&Inst)) - return Load->getPointerOperand(); - else if (StoreInst *Store = dyn_cast(&Inst)) - return Store->getPointerOperand(); - else if (GetElementPtrInst *Gep = dyn_cast(&Inst)) - return Gep->getPointerOperand(); - return nullptr; -} - void Delinearization::print(raw_ostream &O, const Module *) const { O << "Delinearization on function " << F->getName() << ":\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { @@ -93,7 +83,7 @@ void Delinearization::print(raw_ostream &O, const Module *) const { // Delinearize the memory access as analyzed in all the surrounding loops. // Do not analyze memory accesses outside loops. for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) { - const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L); + const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(Inst), L); const SCEVUnknown *BasePointer = dyn_cast(SE->getPointerBase(AccessFn)); diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp index 0d89bbd..b63ec27 100644 --- a/llvm/lib/Analysis/DependenceAnalysis.cpp +++ b/llvm/lib/Analysis/DependenceAnalysis.cpp @@ -643,17 +643,6 @@ bool isLoadOrStore(const Instruction *I) { } -static -Value *getPointerOperand(Instruction *I) { - if (LoadInst *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast(I)) - return SI->getPointerOperand(); - llvm_unreachable("Value is not load or store instruction"); - return nullptr; -} - - // Examines the loop nesting of the Src and Dst // instructions and establishes their shared loops. Sets the variables // CommonLevels, SrcLevels, and MaxLevels. @@ -3176,8 +3165,10 @@ void DependenceInfo::updateDirection(Dependence::DVEntry &Level, /// for each loop level. bool DependenceInfo::tryDelinearize(Instruction *Src, Instruction *Dst, SmallVectorImpl &Pair) { - Value *SrcPtr = getPointerOperand(Src); - Value *DstPtr = getPointerOperand(Dst); + assert(isLoadOrStore(Src) && "instruction is not load or store"); + assert(isLoadOrStore(Dst) && "instruction is not load or store"); + Value *SrcPtr = getLoadStorePointerOperand(Src); + Value *DstPtr = getLoadStorePointerOperand(Dst); Loop *SrcLoop = LI->getLoopFor(Src->getParent()); Loop *DstLoop = LI->getLoopFor(Dst->getParent()); @@ -3302,8 +3293,10 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, return make_unique(Src, Dst); } - Value *SrcPtr = getPointerOperand(Src); - Value *DstPtr = getPointerOperand(Dst); + assert(isLoadOrStore(Src) && "instruction is not load or store"); + assert(isLoadOrStore(Dst) && "instruction is not load or store"); + Value *SrcPtr = getLoadStorePointerOperand(Src); + Value *DstPtr = getLoadStorePointerOperand(Dst); switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr, SrcPtr)) { @@ -3720,8 +3713,8 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep, assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory()); assert(isLoadOrStore(Src)); assert(isLoadOrStore(Dst)); - Value *SrcPtr = getPointerOperand(Src); - Value *DstPtr = getPointerOperand(Dst); + Value *SrcPtr = getLoadStorePointerOperand(Src); + Value *DstPtr = getLoadStorePointerOperand(Dst); assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr, SrcPtr) == MustAlias); diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index beef700..9836637 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -1087,16 +1087,6 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, return Stride; } -/// Take the pointer operand from the Load/Store instruction. -/// Returns NULL if this is not a valid Load/Store instruction. -static Value *getPointerOperand(Value *I) { - if (auto *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (auto *SI = dyn_cast(I)) - return SI->getPointerOperand(); - return nullptr; -} - /// Take the address space operand from the Load/Store instruction. /// Returns -1 if this is not a valid Load/Store instruction. static unsigned getAddressSpaceOperand(Value *I) { @@ -1110,8 +1100,8 @@ static unsigned getAddressSpaceOperand(Value *I) { /// Returns true if the memory operations \p A and \p B are consecutive. bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType) { - Value *PtrA = getPointerOperand(A); - Value *PtrB = getPointerOperand(B); + Value *PtrA = getLoadStorePointerOperand(A); + Value *PtrB = getLoadStorePointerOperand(B); unsigned ASA = getAddressSpaceOperand(A); unsigned ASB = getAddressSpaceOperand(B); diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp index 342a6d0..27f1372 100644 --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -532,12 +532,7 @@ private: Value *getPointerOperand() const { if (IsTargetMemInst) return Info.PtrVal; - if (LoadInst *LI = dyn_cast(Inst)) { - return LI->getPointerOperand(); - } else if (StoreInst *SI = dyn_cast(Inst)) { - return SI->getPointerOperand(); - } - return nullptr; + return getLoadStorePointerOperand(Inst); } bool mayReadFromMemory() const { diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index 05bf8c6..d64bb0b 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -118,8 +118,6 @@ public: bool run(); private: - Value *getPointerOperand(Value *I) const; - GetElementPtrInst *getSourceGEP(Value *Src) const; unsigned getPointerAddressSpace(Value *I); @@ -271,14 +269,6 @@ bool Vectorizer::run() { return Changed; } -Value *Vectorizer::getPointerOperand(Value *I) const { - if (LoadInst *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast(I)) - return SI->getPointerOperand(); - return nullptr; -} - unsigned Vectorizer::getPointerAddressSpace(Value *I) { if (LoadInst *L = dyn_cast(I)) return L->getPointerAddressSpace(); @@ -292,7 +282,7 @@ GetElementPtrInst *Vectorizer::getSourceGEP(Value *Src) const { // and without casts. // TODO: a stride set by the add instruction below can match the difference // in pointee type size here. Currently it will not be vectorized. - Value *SrcPtr = getPointerOperand(Src); + Value *SrcPtr = getLoadStorePointerOperand(Src); Value *SrcBase = SrcPtr->stripPointerCasts(); if (DL.getTypeStoreSize(SrcPtr->getType()->getPointerElementType()) == DL.getTypeStoreSize(SrcBase->getType()->getPointerElementType())) @@ -302,8 +292,8 @@ GetElementPtrInst *Vectorizer::getSourceGEP(Value *Src) const { // FIXME: Merge with llvm::isConsecutiveAccess bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { - Value *PtrA = getPointerOperand(A); - Value *PtrB = getPointerOperand(B); + Value *PtrA = getLoadStorePointerOperand(A); + Value *PtrB = getLoadStorePointerOperand(B); unsigned ASA = getPointerAddressSpace(A); unsigned ASB = getPointerAddressSpace(B); @@ -482,7 +472,7 @@ Vectorizer::getBoundaryInstrs(ArrayRef Chain) { void Vectorizer::eraseInstructions(ArrayRef Chain) { SmallVector Instrs; for (Instruction *I : Chain) { - Value *PtrOperand = getPointerOperand(I); + Value *PtrOperand = getLoadStorePointerOperand(I); assert(PtrOperand && "Instruction must have a pointer operand."); Instrs.push_back(I); if (GetElementPtrInst *GEP = dyn_cast(PtrOperand)) @@ -592,10 +582,10 @@ Vectorizer::getVectorizablePrefix(ArrayRef Chain) { dbgs() << "LSV: Found alias:\n" " Aliasing instruction and pointer:\n" << " " << *MemInstr << '\n' - << " " << *getPointerOperand(MemInstr) << '\n' + << " " << *getLoadStorePointerOperand(MemInstr) << '\n' << " Aliased instruction and pointer:\n" << " " << *ChainInstr << '\n' - << " " << *getPointerOperand(ChainInstr) << '\n'; + << " " << *getLoadStorePointerOperand(ChainInstr) << '\n'; }); // Save this aliasing memory instruction as a barrier, but allow other // instructions that precede the barrier to be vectorized with this one. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 8673395..0a5938e 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -296,16 +296,6 @@ static Type *ToVectorTy(Type *Scalar, unsigned VF) { // in the project. They can be effectively organized in a common Load/Store // utilities unit. -/// A helper function that returns the pointer operand of a load or store -/// instruction. -static Value *getPointerOperand(Value *I) { - if (auto *LI = dyn_cast(I)) - return LI->getPointerOperand(); - if (auto *SI = dyn_cast(I)) - return SI->getPointerOperand(); - return nullptr; -} - /// A helper function that returns the type of loaded or stored value. static Type *getMemInstValueType(Value *I) { assert((isa(I) || isa(I)) && @@ -2860,7 +2850,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { return; const DataLayout &DL = Instr->getModule()->getDataLayout(); - Value *Ptr = getPointerOperand(Instr); + Value *Ptr = getLoadStorePointerOperand(Instr); // Prepare for the vector type of the interleaved load/store. Type *ScalarTy = getMemInstValueType(Instr); @@ -3002,7 +2992,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, Type *ScalarDataTy = getMemInstValueType(Instr); Type *DataTy = VectorType::get(ScalarDataTy, VF); - Value *Ptr = getPointerOperand(Instr); + Value *Ptr = getLoadStorePointerOperand(Instr); unsigned Alignment = getMemInstAlignment(Instr); // An alignment of 0 means target abi alignment. We need to use the scalar's // target abi alignment in such a case. @@ -4797,7 +4787,7 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() { continue; for (Instruction &I : *BB) - if (auto *Ptr = getPointerOperand(&I)) + if (auto *Ptr = getLoadStorePointerOperand(&I)) SafePointes.insert(Ptr); } @@ -5248,7 +5238,7 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { if (auto *Store = dyn_cast(MemAccess)) if (Ptr == Store->getValueOperand()) return WideningDecision == CM_Scalarize; - assert(Ptr == getPointerOperand(MemAccess) && + assert(Ptr == getLoadStorePointerOperand(MemAccess) && "Ptr is neither a value or pointer operand"); return WideningDecision != CM_GatherScatter; }; @@ -5416,7 +5406,7 @@ bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) { case Instruction::Store: { if (!Legal->isMaskRequired(I)) return false; - auto *Ptr = getPointerOperand(I); + auto *Ptr = getLoadStorePointerOperand(I); auto *Ty = getMemInstValueType(I); return isa(I) ? !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) @@ -5438,7 +5428,7 @@ bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, StoreInst *SI = dyn_cast(I); assert((LI || SI) && "Invalid memory instruction"); - auto *Ptr = getPointerOperand(I); + auto *Ptr = getLoadStorePointerOperand(I); // In order to be widened, the pointer should be consecutive, first of all. if (!Legal->isConsecutivePtr(Ptr)) @@ -5524,7 +5514,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { for (auto *BB : TheLoop->blocks()) for (auto &I : *BB) { // If there's no pointer operand, there's nothing to do. - auto *Ptr = dyn_cast_or_null(getPointerOperand(&I)); + auto *Ptr = dyn_cast_or_null(getLoadStorePointerOperand(&I)); if (!Ptr) continue; @@ -5532,7 +5522,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { // pointer operand. auto UsersAreMemAccesses = llvm::all_of(Ptr->users(), [&](User *U) -> bool { - return getPointerOperand(U) == Ptr; + return getLoadStorePointerOperand(U) == Ptr; }); // Ensure the memory instruction will not be scalarized or used by @@ -5572,7 +5562,8 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { if (llvm::all_of(OI->users(), [&](User *U) -> bool { auto *J = cast(U); return !TheLoop->contains(J) || Worklist.count(J) || - (OI == getPointerOperand(J) && isUniformDecision(J, VF)); + (OI == getLoadStorePointerOperand(J) && + isUniformDecision(J, VF)); })) { Worklist.insert(OI); DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); @@ -5583,7 +5574,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { // Returns true if Ptr is the pointer operand of a memory access instruction // I, and I is known to not require scalarization. auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { - return getPointerOperand(I) == Ptr && isUniformDecision(I, VF); + return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); }; // For an instruction to be added into Worklist above, all its users inside @@ -5744,7 +5735,7 @@ void InterleavedAccessInfo::collectConstStrideAccesses( if (!LI && !SI) continue; - Value *Ptr = getPointerOperand(&I); + Value *Ptr = getLoadStorePointerOperand(&I); // We don't check wrapping here because we don't know yet if Ptr will be // part of a full group or a group with gaps. Checking wrapping for all // pointers (even those that end up in groups with no gaps) will be overly @@ -5994,7 +5985,7 @@ void InterleavedAccessInfo::analyzeInterleaving( // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on // peeling (if it is a non-reveresed accsess -- see Case 3). - Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); + Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " @@ -6004,7 +5995,7 @@ void InterleavedAccessInfo::analyzeInterleaving( } Instruction *LastMember = Group->getMember(Group->getFactor() - 1); if (LastMember) { - Value *LastMemberPtr = getPointerOperand(LastMember); + Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " @@ -6824,7 +6815,7 @@ unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, unsigned Alignment = getMemInstAlignment(I); unsigned AS = getMemInstAddressSpace(I); - Value *Ptr = getPointerOperand(I); + Value *Ptr = getLoadStorePointerOperand(I); Type *PtrTy = ToVectorTy(Ptr->getType(), VF); // Figure out whether the access is strided and get the stride value @@ -6862,7 +6853,7 @@ unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, Type *ValTy = getMemInstValueType(I); Type *VectorTy = ToVectorTy(ValTy, VF); unsigned Alignment = getMemInstAlignment(I); - Value *Ptr = getPointerOperand(I); + Value *Ptr = getLoadStorePointerOperand(I); unsigned AS = getMemInstAddressSpace(I); int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); @@ -6898,7 +6889,7 @@ unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, Type *ValTy = getMemInstValueType(I); Type *VectorTy = ToVectorTy(ValTy, VF); unsigned Alignment = getMemInstAlignment(I); - Value *Ptr = getPointerOperand(I); + Value *Ptr = getLoadStorePointerOperand(I); return TTI.getAddressComputationCost(VectorTy) + TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, @@ -6982,7 +6973,7 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { for (BasicBlock *BB : TheLoop->blocks()) { // For each instruction in the old loop. for (Instruction &I : *BB) { - Value *Ptr = getPointerOperand(&I); + Value *Ptr = getLoadStorePointerOperand(&I); if (!Ptr) continue; @@ -6998,7 +6989,8 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { // We assume that widening is the best solution when possible. if (memoryInstructionCanBeWidened(&I, VF)) { unsigned Cost = getConsecutiveMemOpCost(&I, VF); - int ConsecutiveStride = Legal->isConsecutivePtr(getPointerOperand(&I)); + int ConsecutiveStride = + Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && "Expected consecutive stride."); InstWidening Decision = @@ -7068,7 +7060,7 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { for (BasicBlock *BB : TheLoop->blocks()) for (Instruction &I : *BB) { Instruction *PtrDef = - dyn_cast_or_null(getPointerOperand(&I)); + dyn_cast_or_null(getLoadStorePointerOperand(&I)); if (PtrDef && TheLoop->contains(PtrDef) && getWideningDecision(&I, VF) != CM_GatherScatter) AddrDefs.insert(PtrDef); @@ -7382,7 +7374,7 @@ Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { // Check if the pointer operand of a load or store instruction is // consecutive. - if (auto *Ptr = getPointerOperand(Inst)) + if (auto *Ptr = getLoadStorePointerOperand(Inst)) return Legal->isConsecutivePtr(Ptr); return false; } -- 2.7.4