#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
#define LLVM_ANALYSIS_ALIASANALYSIS_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
}
+/// This class stores info we want to provide to or retain within an alias
+/// query. By default, the root query is stateless and starts with a freshly
+/// constructed info object. Specific alias analyses can use this query info to
+/// store per-query state that is important for recursive or nested queries to
+/// avoid recomputing. To enable preserving this state across multiple queries
+/// where safe (due to the IR not changing), use a `BatchAAResults` wrapper.
+/// The information stored in an `AAQueryInfo` is currently limitted to the
+/// caches used by BasicAA, but can further be extended to fit other AA needs.
+class AAQueryInfo {
+public:
+ using LocPair = std::pair<MemoryLocation, MemoryLocation>;
+ using AliasCacheT = SmallDenseMap<LocPair, AliasResult, 8>;
+ AliasCacheT AliasCache;
+
+ using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>;
+ IsCapturedCacheT IsCapturedCache;
+
+ AAQueryInfo() : AliasCache(), IsCapturedCache() {}
+};
+
+class BatchAAResults;
+
class AAResults {
public:
// Make these results default constructable and movable. We have to spell
/// helpers above.
ModRefInfo getModRefInfo(const Instruction *I,
const Optional<MemoryLocation> &OptLoc) {
- if (OptLoc == None) {
- if (const auto *Call = dyn_cast<CallBase>(I)) {
- return createModRefInfo(getModRefBehavior(Call));
- }
- }
-
- const MemoryLocation &Loc = OptLoc.getValueOr(MemoryLocation());
-
- switch (I->getOpcode()) {
- case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, Loc);
- case Instruction::Load: return getModRefInfo((const LoadInst*)I, Loc);
- case Instruction::Store: return getModRefInfo((const StoreInst*)I, Loc);
- case Instruction::Fence: return getModRefInfo((const FenceInst*)I, Loc);
- case Instruction::AtomicCmpXchg:
- return getModRefInfo((const AtomicCmpXchgInst*)I, Loc);
- case Instruction::AtomicRMW:
- return getModRefInfo((const AtomicRMWInst*)I, Loc);
- case Instruction::Call: return getModRefInfo((const CallInst*)I, Loc);
- case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,Loc);
- case Instruction::CatchPad:
- return getModRefInfo((const CatchPadInst *)I, Loc);
- case Instruction::CatchRet:
- return getModRefInfo((const CatchReturnInst *)I, Loc);
- default:
- return ModRefInfo::NoModRef;
- }
+ AAQueryInfo AAQIP;
+ return getModRefInfo(I, OptLoc, AAQIP);
}
/// A convenience wrapper for constructing the memory location.
}
private:
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal = false);
+ ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2,
+ AAQueryInfo &AAQIP);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc, AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const Instruction *I,
+ const Optional<MemoryLocation> &OptLoc,
+ AAQueryInfo &AAQIP) {
+ if (OptLoc == None) {
+ if (const auto *Call = dyn_cast<CallBase>(I)) {
+ return createModRefInfo(getModRefBehavior(Call));
+ }
+ }
+
+ const MemoryLocation &Loc = OptLoc.getValueOr(MemoryLocation());
+
+ switch (I->getOpcode()) {
+ case Instruction::VAArg:
+ return getModRefInfo((const VAArgInst *)I, Loc, AAQIP);
+ case Instruction::Load:
+ return getModRefInfo((const LoadInst *)I, Loc, AAQIP);
+ case Instruction::Store:
+ return getModRefInfo((const StoreInst *)I, Loc, AAQIP);
+ case Instruction::Fence:
+ return getModRefInfo((const FenceInst *)I, Loc, AAQIP);
+ case Instruction::AtomicCmpXchg:
+ return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP);
+ case Instruction::AtomicRMW:
+ return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP);
+ case Instruction::Call:
+ return getModRefInfo((const CallInst *)I, Loc, AAQIP);
+ case Instruction::Invoke:
+ return getModRefInfo((const InvokeInst *)I, Loc, AAQIP);
+ case Instruction::CatchPad:
+ return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP);
+ case Instruction::CatchRet:
+ return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP);
+ default:
+ return ModRefInfo::NoModRef;
+ }
+ }
+
class Concept;
template <typename T> class Model;
std::vector<std::unique_ptr<Concept>> AAs;
std::vector<AnalysisKey *> AADeps;
+
+ friend class BatchAAResults;
+};
+
+/// This class is a wrapper over an AAResults, and it is intended to be used
+/// only when there are no IR changes inbetween queries. BatchAAResults is
+/// reusing the same `AAQueryInfo` to preserve the state across queries,
+/// esentially making AA work in "batch mode". The internal state cannot be
+/// cleared, so to go "out-of-batch-mode", the user must either use AAResults,
+/// or create a new BatchAAResults.
+class BatchAAResults {
+ AAResults &AA;
+ AAQueryInfo AAQI;
+
+public:
+ BatchAAResults(AAResults &AAR) : AA(AAR), AAQI() {}
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ return AA.alias(LocA, LocB, AAQI);
+ }
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) {
+ return AA.pointsToConstantMemory(Loc, AAQI, OrLocal);
+ }
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
+ return AA.getModRefInfo(Call, Loc, AAQI);
+ }
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
+ return AA.getModRefInfo(Call1, Call2, AAQI);
+ }
+ ModRefInfo getModRefInfo(const Instruction *I,
+ const Optional<MemoryLocation> &OptLoc) {
+ return AA.getModRefInfo(I, OptLoc, AAQI);
+ }
+ ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2) {
+ return AA.getModRefInfo(I, Call2, AAQI);
+ }
+ ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
+ return AA.getArgModRefInfo(Call, ArgIdx);
+ }
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
+ return AA.getModRefBehavior(Call);
+ }
};
/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
/// each other. This is the interface that must be implemented by specific
/// alias analysis implementations.
virtual AliasResult alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) = 0;
+ const MemoryLocation &LocB, AAQueryInfo &AAQI) = 0;
/// Checks whether the given location points to constant memory, or if
/// \p OrLocal is true whether it points to a local alloca.
virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
- bool OrLocal) = 0;
+ AAQueryInfo &AAQI, bool OrLocal) = 0;
/// @}
//===--------------------------------------------------------------------===//
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
virtual ModRefInfo getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) = 0;
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) = 0;
/// Return information about whether two call sites may refer to the same set
/// of memory locations. See the AA documentation for details:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
- virtual ModRefInfo getModRefInfo(const CallBase *Call1,
- const CallBase *Call2) = 0;
+ virtual ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI) = 0;
/// @}
};
void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); }
- AliasResult alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) override {
- return Result.alias(LocA, LocB);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) override {
+ return Result.alias(LocA, LocB, AAQI);
}
- bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
bool OrLocal) override {
- return Result.pointsToConstantMemory(Loc, OrLocal);
+ return Result.pointsToConstantMemory(Loc, AAQI, OrLocal);
}
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
return Result.getModRefBehavior(F);
}
- ModRefInfo getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) override {
- return Result.getModRefInfo(Call, Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) override {
+ return Result.getModRefInfo(Call, Loc, AAQI);
}
- ModRefInfo getModRefInfo(const CallBase *Call1,
- const CallBase *Call2) override {
- return Result.getModRefInfo(Call1, Call2);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI) override {
+ return Result.getModRefInfo(Call1, Call2, AAQI);
}
};
AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult)
: AAR(AAR), CurrentResult(CurrentResult) {}
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
- return AAR ? AAR->alias(LocA, LocB) : CurrentResult.alias(LocA, LocB);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
+ return AAR ? AAR->alias(LocA, LocB, AAQI)
+ : CurrentResult.alias(LocA, LocB, AAQI);
}
- bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
- return AAR ? AAR->pointsToConstantMemory(Loc, OrLocal)
- : CurrentResult.pointsToConstantMemory(Loc, OrLocal);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal) {
+ return AAR ? AAR->pointsToConstantMemory(Loc, AAQI, OrLocal)
+ : CurrentResult.pointsToConstantMemory(Loc, AAQI, OrLocal);
}
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
}
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
- return AAR ? AAR->getModRefInfo(Call, Loc)
- : CurrentResult.getModRefInfo(Call, Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
+ return AAR ? AAR->getModRefInfo(Call, Loc, AAQI)
+ : CurrentResult.getModRefInfo(Call, Loc, AAQI);
}
- ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
- return AAR ? AAR->getModRefInfo(Call1, Call2)
- : CurrentResult.getModRefInfo(Call1, Call2);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI) {
+ return AAR ? AAR->getModRefInfo(Call1, Call2, AAQI)
+ : CurrentResult.getModRefInfo(Call1, Call2, AAQI);
}
};
AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); }
public:
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
return MayAlias;
}
- bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal) {
return false;
}
return FMRB_UnknownModRefBehavior;
}
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
return ModRefInfo::ModRef;
}
- ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI) {
return ModRefInfo::ModRef;
}
};
bool invalidate(Function &Fn, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
- ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI);
/// Chases pointers until we find a (constant global) or not.
- bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal);
/// Get the location associated with a pointer argument of a callsite.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
SmallVector<VariableGEPIndex, 4> VarIndices;
};
- /// Track alias queries to guard against recursion.
- using LocPair = std::pair<MemoryLocation, MemoryLocation>;
- using AliasCacheTy = SmallDenseMap<LocPair, AliasResult, 8>;
- AliasCacheTy AliasCache;
- using IsCapturedCacheTy = SmallDenseMap<const Value *, bool, 8>;
- IsCapturedCacheTy IsCapturedCache;
-
/// Tracks phi nodes we have visited.
///
/// When interpret "Value" pointer equality as value equality we need to make
AliasResult aliasGEP(const GEPOperator *V1, LocationSize V1Size,
const AAMDNodes &V1AAInfo, const Value *V2,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
- const Value *UnderlyingV1, const Value *UnderlyingV2);
+ const Value *UnderlyingV1, const Value *UnderlyingV2,
+ AAQueryInfo &AAQI);
AliasResult aliasPHI(const PHINode *PN, LocationSize PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
- const Value *UnderV2);
+ const Value *UnderV2, AAQueryInfo &AAQI);
AliasResult aliasSelect(const SelectInst *SI, LocationSize SISize,
const AAMDNodes &SIAAInfo, const Value *V2,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
- const Value *UnderV2);
+ const Value *UnderV2, AAQueryInfo &AAQI);
AliasResult aliasCheck(const Value *V1, LocationSize V1Size,
AAMDNodes V1AATag, const Value *V2,
LocationSize V2Size, AAMDNodes V2AATag,
- const Value *O1 = nullptr, const Value *O2 = nullptr);
+ AAQueryInfo &AAQI, const Value *O1 = nullptr,
+ const Value *O2 = nullptr);
};
/// Analysis pass providing a never-invalidated alias analysis result.
const cflaa::AliasSummary *getAliasSummary(const Function &);
AliasResult query(const MemoryLocation &, const MemoryLocation &);
- AliasResult alias(const MemoryLocation &, const MemoryLocation &);
+ AliasResult alias(const MemoryLocation &, const MemoryLocation &,
+ AAQueryInfo &);
private:
/// Ensures that the given function is available in the cache.
AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
if (LocA.Ptr == LocB.Ptr)
return MustAlias;
// ConstantExpr, but every query needs to have at least one Value tied to a
// Function, and neither GlobalValues nor ConstantExprs are.
if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr))
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
AliasResult QueryResult = query(LocA, LocB);
if (QueryResult == MayAlias)
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
return QueryResult;
}
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
using AAResultBase::getModRefInfo;
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
ModRefInfo getModRefInfoForArgument(const CallBase *Call,
- const GlobalValue *GV);
+ const GlobalValue *GV, AAQueryInfo &AAQI);
};
/// Analysis pass providing a never-invalidated alias analysis result.
const MemoryUseOrDef *Template = nullptr);
private:
- class ClobberWalkerBase;
- class CachingWalker;
- class SkipSelfWalker;
+ template <class AliasAnalysisType> class ClobberWalkerBase;
+ template <class AliasAnalysisType> class CachingWalker;
+ template <class AliasAnalysisType> class SkipSelfWalker;
class OptimizeUses;
- CachingWalker *getWalkerImpl();
- void buildMemorySSA();
+ CachingWalker<AliasAnalysis> *getWalkerImpl();
+ void buildMemorySSA(BatchAAResults &BAA);
void optimizeUses();
void prepareForMoveTo(MemoryAccess *, BasicBlock *);
void markUnreachableAsLiveOnEntry(BasicBlock *BB);
bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
MemoryPhi *createMemoryPhi(BasicBlock *BB);
- MemoryUseOrDef *createNewAccess(Instruction *,
+ template <typename AliasAnalysisType>
+ MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
const MemoryUseOrDef *Template = nullptr);
MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
// Memory SSA building info
- std::unique_ptr<ClobberWalkerBase> WalkerBase;
- std::unique_ptr<CachingWalker> Walker;
- std::unique_ptr<SkipSelfWalker> SkipWalker;
+ std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
+ std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
+ std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
unsigned NextID;
};
return false;
}
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
- bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal);
using AAResultBase::getModRefBehavior;
FunctionModRefBehavior getModRefBehavior(const Function *F);
using AAResultBase::getModRefInfo;
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
};
/// Analysis pass providing a never-invalidated alias analysis result.
explicit SCEVAAResult(ScalarEvolution &SE) : AAResultBase(), SE(SE) {}
SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
private:
Value *GetBaseValue(const SCEV *S);
return false;
}
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
- ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI);
private:
bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
return false;
}
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
- bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal);
FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
FunctionModRefBehavior getModRefBehavior(const Function *F);
- ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
- ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+ AAQueryInfo &AAQI);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+ AAQueryInfo &AAQI);
private:
bool Aliases(const MDNode *A, const MDNode *B) const;
AliasResult AAResults::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) {
+ AAQueryInfo AAQIP;
+ return alias(LocA, LocB, AAQIP);
+}
+
+AliasResult AAResults::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB, AAQueryInfo &AAQI) {
for (const auto &AA : AAs) {
- auto Result = AA->alias(LocA, LocB);
+ auto Result = AA->alias(LocA, LocB, AAQI);
if (Result != MayAlias)
return Result;
}
bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
+ AAQueryInfo AAQIP;
+ return pointsToConstantMemory(Loc, AAQIP, OrLocal);
+}
+
+bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
+ AAQueryInfo &AAQI, bool OrLocal) {
for (const auto &AA : AAs)
- if (AA->pointsToConstantMemory(Loc, OrLocal))
+ if (AA->pointsToConstantMemory(Loc, AAQI, OrLocal))
return true;
return false;
}
ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(I, Call2, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2,
+ AAQueryInfo &AAQI) {
// We may have two calls.
if (const auto *Call1 = dyn_cast<CallBase>(I)) {
// Check if the two calls modify the same memory.
- return getModRefInfo(Call1, Call2);
+ return getModRefInfo(Call1, Call2, AAQI);
} else if (I->isFenceLike()) {
// If this is a fence, just return ModRef.
return ModRefInfo::ModRef;
// is that if the call references what this instruction
// defines, it must be clobbered by this location.
const MemoryLocation DefLoc = MemoryLocation::get(I);
- ModRefInfo MR = getModRefInfo(Call2, DefLoc);
+ ModRefInfo MR = getModRefInfo(Call2, DefLoc, AAQI);
if (isModOrRefSet(MR))
return setModAndRef(MR);
}
ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(Call, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
- Result = intersectModRef(Result, AA->getModRefInfo(Call, Loc));
+ Result = intersectModRef(Result, AA->getModRefInfo(Call, Loc, AAQI));
// Early-exit the moment we reach the bottom of the lattice.
if (isNoModRef(Result))
ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
const CallBase *Call2) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(Call1, Call2, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
+ const CallBase *Call2, AAQueryInfo &AAQI) {
ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
- Result = intersectModRef(Result, AA->getModRefInfo(Call1, Call2));
+ Result = intersectModRef(Result, AA->getModRefInfo(Call1, Call2, AAQI));
// Early-exit the moment we reach the bottom of the lattice.
if (isNoModRef(Result))
ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(L, Loc, AAQIP);
+}
+ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
// Be conservative in the face of atomic.
if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
return ModRefInfo::ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr) {
- AliasResult AR = alias(MemoryLocation::get(L), Loc);
+ AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI);
if (AR == NoAlias)
return ModRefInfo::NoModRef;
if (AR == MustAlias)
ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(S, Loc, AAQIP);
+}
+ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
// Be conservative in the face of atomic.
if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
return ModRefInfo::ModRef;
if (Loc.Ptr) {
- AliasResult AR = alias(MemoryLocation::get(S), Loc);
+ AliasResult AR = alias(MemoryLocation::get(S), Loc, AAQI);
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
if (AR == NoAlias)
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this store.
- if (pointsToConstantMemory(Loc))
+ if (pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::NoModRef;
// If the store address aliases the pointer as must alias, set Must.
}
ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(S, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
// If we know that the location is a constant memory location, the fence
// cannot modify this location.
- if (Loc.Ptr && pointsToConstantMemory(Loc))
+ if (Loc.Ptr && pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::Ref;
return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(V, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
if (Loc.Ptr) {
- AliasResult AR = alias(MemoryLocation::get(V), Loc);
+ AliasResult AR = alias(MemoryLocation::get(V), Loc, AAQI);
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
if (AR == NoAlias)
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this va_arg.
- if (pointsToConstantMemory(Loc))
+ if (pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::NoModRef;
// If the va_arg aliases the pointer as must alias, set Must.
ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(CatchPad, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
if (Loc.Ptr) {
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
- if (pointsToConstantMemory(Loc))
+ if (pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::NoModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(CatchRet, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
if (Loc.Ptr) {
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
- if (pointsToConstantMemory(Loc))
+ if (pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::NoModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(CX, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
return ModRefInfo::ModRef;
if (Loc.Ptr) {
- AliasResult AR = alias(MemoryLocation::get(CX), Loc);
+ AliasResult AR = alias(MemoryLocation::get(CX), Loc, AAQI);
// If the cmpxchg address does not alias the location, it does not access
// it.
if (AR == NoAlias)
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
+ AAQueryInfo AAQIP;
+ return getModRefInfo(RMW, Loc, AAQIP);
+}
+
+ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(RMW->getOrdering()))
return ModRefInfo::ModRef;
if (Loc.Ptr) {
- AliasResult AR = alias(MemoryLocation::get(RMW), Loc);
+ AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI);
// If the atomicrmw address does not alias the location, it does not access
// it.
if (AR == NoAlias)
/// the function, with global constants being considered local to all
/// functions.
bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
- bool OrLocal) {
+ AAQueryInfo &AAQI, bool OrLocal) {
assert(Visited.empty() && "Visited must be cleared after use!");
unsigned MaxLookup = 8;
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
if (!Visited.insert(V).second) {
Visited.clear();
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
}
// An alloca instruction defines local memory.
// others. GV may even be a declaration, not a definition.
if (!GV->isConstant()) {
Visited.clear();
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
}
continue;
}
// Don't bother inspecting phi nodes with many operands.
if (PN->getNumIncomingValues() > MaxLookup) {
Visited.clear();
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
}
for (Value *IncValue : PN->incoming_values())
Worklist.push_back(IncValue);
// Otherwise be conservative.
Visited.clear();
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
} while (!Worklist.empty() && --MaxLookup);
Visited.clear();
#endif
AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
// If we have a directly cached entry for these locations, we have recursed
// through this once, so just return the cached results. Notably, when this
// happens, we don't clear the cache.
- auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
- if (CacheIt != AliasCache.end())
+ auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB));
+ if (CacheIt != AAQI.AliasCache.end())
+ return CacheIt->second;
+
+ CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA));
+ if (CacheIt != AAQI.AliasCache.end())
return CacheIt->second;
AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
- LocB.Size, LocB.AATags);
- // AliasCache rarely has more than 1 or 2 elements, always use
- // shrink_and_clear so it quickly returns to the inline capacity of the
- // SmallDenseMap if it ever grows larger.
- // FIXME: This should really be shrink_to_inline_capacity_and_clear().
- AliasCache.shrink_and_clear();
- IsCapturedCache.shrink_and_clear();
+ LocB.Size, LocB.AATags, AAQI);
+
VisitedPhiBBs.clear();
return Alias;
}
/// say much about this query. We do, however, use simple "address taken"
/// analysis on local objects.
ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) {
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
assert(notDifferentParent(Call, Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
// then the call can not mod/ref the pointer unless the call takes the pointer
// as an argument, and itself doesn't capture it.
if (!isa<Constant>(Object) && Call != Object &&
- isNonEscapingLocalObject(Object)) {
+ isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
// Optimistically assume that call doesn't touch Object and check this
// assumption in the following loop.
// If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking.
- AliasResult AR =
- getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
+ AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
+ MemoryLocation(Object), AAQI);
if (AR != MustAlias)
IsMustAlias = false;
// Operand doesn't alias 'Object', continue looking for other aliases
if (isMallocOrCallocLikeFn(Call, &TLI)) {
// Be conservative if the accessed pointer may alias the allocation -
// fallback to the generic handling below.
- if (getBestAAResults().alias(MemoryLocation(Call), Loc) == NoAlias)
+ if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
return ModRefInfo::NoModRef;
}
AliasResult SrcAA, DestAA;
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
- Loc)) == MustAlias)
+ Loc, AAQI)) == MustAlias)
// Loc is exactly the memcpy source thus disjoint from memcpy dest.
return ModRefInfo::Ref;
if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
- Loc)) == MustAlias)
+ Loc, AAQI)) == MustAlias)
// The converse case.
return ModRefInfo::Mod;
return ModRefInfo::Ref;
// The AAResultBase base class has some smarts, lets use them.
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
}
ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
- const CallBase *Call2) {
+ const CallBase *Call2,
+ AAQueryInfo &AAQI) {
// While the assume intrinsic is marked as arbitrarily writing so that
// proper control dependencies will be maintained, it never aliases any
// particular memory location.
: ModRefInfo::NoModRef;
// The AAResultBase base class has some smarts, lets use them.
- return AAResultBase::getModRefInfo(Call1, Call2);
+ return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
}
/// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
/// We know that V1 is a GEP, but we don't know anything about V2.
/// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
/// V2.
-AliasResult
-BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
- const AAMDNodes &V1AAInfo, const Value *V2,
- LocationSize V2Size, const AAMDNodes &V2AAInfo,
- const Value *UnderlyingV1, const Value *UnderlyingV2) {
+AliasResult BasicAAResult::aliasGEP(
+ const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
+ const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
DecomposedGEP DecompGEP1, DecompGEP2;
unsigned MaxPointerSize = getMaxPointerSize(DL);
DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
// Do the base pointers alias?
AliasResult BaseAlias =
aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
- UnderlyingV2, LocationSize::unknown(), AAMDNodes());
+ UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI);
// Check for geps of non-aliasing underlying pointers where the offsets are
// identical.
if ((BaseAlias == MayAlias) && V1Size == V2Size) {
// Do the base pointers alias assuming type and size.
- AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
- UnderlyingV2, V2Size, V2AAInfo);
+ AliasResult PreciseBaseAlias = aliasCheck(
+ UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI);
if (PreciseBaseAlias == NoAlias) {
// See if the computed offset from the common pointer tells us about the
// relation of the resulting pointer.
if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
return MayAlias;
- AliasResult R =
- aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), V2,
- LocationSize::unknown(), V2AAInfo, nullptr, UnderlyingV2);
+ AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(),
+ AAMDNodes(), V2, LocationSize::unknown(),
+ V2AAInfo, AAQI, nullptr, UnderlyingV2);
if (R != MustAlias) {
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
// If V2 is known not to alias GEP base pointer, then the two values
/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
/// against another.
-AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
- LocationSize SISize,
- const AAMDNodes &SIAAInfo,
- const Value *V2, LocationSize V2Size,
- const AAMDNodes &V2AAInfo,
- const Value *UnderV2) {
+AliasResult
+BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
+ const AAMDNodes &SIAAInfo, const Value *V2,
+ LocationSize V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderV2, AAQueryInfo &AAQI) {
// If the values are Selects with the same condition, we can do a more precise
// check: just check for aliases between the values on corresponding arms.
if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
if (SI->getCondition() == SI2->getCondition()) {
- AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
- SI2->getTrueValue(), V2Size, V2AAInfo);
+ AliasResult Alias =
+ aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(),
+ V2Size, V2AAInfo, AAQI);
if (Alias == MayAlias)
return MayAlias;
AliasResult ThisAlias =
aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
- SI2->getFalseValue(), V2Size, V2AAInfo);
+ SI2->getFalseValue(), V2Size, V2AAInfo, AAQI);
return MergeAliasResults(ThisAlias, Alias);
}
// If both arms of the Select node NoAlias or MustAlias V2, then returns
// NoAlias / MustAlias. Otherwise, returns MayAlias.
- AliasResult Alias =
- aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
- SISize, SIAAInfo, UnderV2);
+ AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
+ SISize, SIAAInfo, AAQI, UnderV2);
if (Alias == MayAlias)
return MayAlias;
- AliasResult ThisAlias =
- aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
- UnderV2);
+ AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(),
+ SISize, SIAAInfo, AAQI, UnderV2);
return MergeAliasResults(ThisAlias, Alias);
}
const AAMDNodes &PNAAInfo, const Value *V2,
LocationSize V2Size,
const AAMDNodes &V2AAInfo,
- const Value *UnderV2) {
+ const Value *UnderV2, AAQueryInfo &AAQI) {
// Track phi nodes we have visited. We use this information when we determine
// value equivalence.
VisitedPhiBBs.insert(PN->getParent());
// on corresponding edges.
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
- LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
- MemoryLocation(V2, V2Size, V2AAInfo));
+ AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
if (PN > V2)
std::swap(Locs.first, Locs.second);
// Analyse the PHIs' inputs under the assumption that the PHIs are
AliasResult OrigAliasResult;
{
// Limited lifetime iterator invalidated by the aliasCheck call below.
- auto CacheIt = AliasCache.find(Locs);
- assert((CacheIt != AliasCache.end()) &&
+ auto CacheIt = AAQI.AliasCache.find(Locs);
+ assert((CacheIt != AAQI.AliasCache.end()) &&
"There must exist an entry for the phi node");
OrigAliasResult = CacheIt->second;
CacheIt->second = NoAlias;
AliasResult ThisAlias =
aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
- V2Size, V2AAInfo);
+ V2Size, V2AAInfo, AAQI);
Alias = MergeAliasResults(ThisAlias, Alias);
if (Alias == MayAlias)
break;
// Reset if speculation failed.
if (Alias != NoAlias) {
- auto Pair = AliasCache.insert(std::make_pair(Locs, OrigAliasResult));
+ auto Pair =
+ AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult));
assert(!Pair.second && "Entry must have existed");
Pair.first->second = OrigAliasResult;
}
if (isRecursive)
PNSize = LocationSize::unknown();
- AliasResult Alias =
- aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
- PNSize, PNAAInfo, UnderV2);
+ AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize,
+ PNAAInfo, AAQI, UnderV2);
// Early exit if the check of the first PHI source against V2 is MayAlias.
// Other results are not possible.
Value *V = V1Srcs[i];
AliasResult ThisAlias =
- aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
+ aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2);
Alias = MergeAliasResults(ThisAlias, Alias);
if (Alias == MayAlias)
break;
AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
AAMDNodes V1AAInfo, const Value *V2,
LocationSize V2Size, AAMDNodes V2AAInfo,
- const Value *O1, const Value *O2) {
+ AAQueryInfo &AAQI, const Value *O1,
+ const Value *O2) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are.
if (V1Size.isZero() || V2Size.isZero())
// temporary store the nocapture argument's value in a temporary memory
// location if that memory location doesn't escape. Or it may pass a
// nocapture value to other functions as long as they don't capture it.
- if (isEscapeSource(O1) && isNonEscapingLocalObject(O2, &IsCapturedCache))
+ if (isEscapeSource(O1) &&
+ isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
return NoAlias;
- if (isEscapeSource(O2) && isNonEscapingLocalObject(O1, &IsCapturedCache))
+ if (isEscapeSource(O2) &&
+ isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
return NoAlias;
}
// Check the cache before climbing up use-def chains. This also terminates
// otherwise infinitely recursive queries.
- LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
- MemoryLocation(V2, V2Size, V2AAInfo));
+ AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
if (V1 > V2)
std::swap(Locs.first, Locs.second);
- std::pair<AliasCacheTy::iterator, bool> Pair =
- AliasCache.try_emplace(Locs, MayAlias);
+ std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair =
+ AAQI.AliasCache.try_emplace(Locs, MayAlias);
if (!Pair.second)
return Pair.first->second;
}
if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
AliasResult Result =
- aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
- if (Result != MayAlias)
- return AliasCache[Locs] = Result;
+ aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
+ if (Result != MayAlias) {
+ auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result));
+ assert(!ItInsPair.second && "Entry must have existed");
+ ItInsPair.first->second = Result;
+ return Result;
+ }
}
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
std::swap(V1AAInfo, V2AAInfo);
}
if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
- AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
- V2, V2Size, V2AAInfo, O2);
+ AliasResult Result =
+ aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
if (Result != MayAlias) {
- Pair = AliasCache.try_emplace(Locs, Result);
+ Pair = AAQI.AliasCache.try_emplace(Locs, Result);
assert(!Pair.second && "Entry must have existed");
return Pair.first->second = Result;
}
}
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
AliasResult Result =
- aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
+ aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
if (Result != MayAlias) {
- Pair = AliasCache.try_emplace(Locs, Result);
+ Pair = AAQI.AliasCache.try_emplace(Locs, Result);
assert(!Pair.second && "Entry must have existed");
return Pair.first->second = Result;
}
if (V1Size.isPrecise() && V2Size.isPrecise() &&
(isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) {
- Pair = AliasCache.try_emplace(Locs, PartialAlias);
+ Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias);
assert(!Pair.second && "Entry must have existed");
return Pair.first->second = PartialAlias;
}
// Recurse back into the best AA results we have, potentially with refined
// memory locations. We have already ensured that BasicAA has a MayAlias
// cache result for these, so any recursion back into BasicAA won't loop.
- AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
- Pair = AliasCache.try_emplace(Locs, Result);
+ AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI);
+ Pair = AAQI.AliasCache.try_emplace(Locs, Result);
assert(!Pair.second && "Entry must have existed");
return Pair.first->second = Result;
}
}
AliasResult CFLAndersAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
if (LocA.Ptr == LocB.Ptr)
return MustAlias;
// ConstantExpr, but every query needs to have at least one Value tied to a
// Function, and neither GlobalValues nor ConstantExprs are.
if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr))
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
AliasResult QueryResult = query(LocA, LocB);
if (QueryResult == MayAlias)
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
return QueryResult;
}
/// other is some random pointer, we know there cannot be an alias, because the
/// address of the global isn't taken.
AliasResult GlobalsAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
// Get the base object these pointers point to.
const Value *UV1 = GetUnderlyingObject(LocA.Ptr, DL);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr, DL);
if ((GV1 || GV2) && GV1 != GV2)
return NoAlias;
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
}
ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call,
- const GlobalValue *GV) {
+ const GlobalValue *GV,
+ AAQueryInfo &AAQI) {
if (Call->doesNotAccessMemory())
return ModRefInfo::NoModRef;
ModRefInfo ConservativeResult =
if (!all_of(Objects, isIdentifiedObject) &&
// Try ::alias to see if all objects are known not to alias GV.
!all_of(Objects, [&](Value *V) {
- return this->alias(MemoryLocation(V), MemoryLocation(GV)) == NoAlias;
+ return this->alias(MemoryLocation(V), MemoryLocation(GV), AAQI) ==
+ NoAlias;
}))
return ConservativeResult;
}
ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) {
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
ModRefInfo Known = ModRefInfo::ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
if (NonAddressTakenGlobals.count(GV))
if (const FunctionInfo *FI = getFunctionInfo(F))
Known = unionModRef(FI->getModRefInfoForGlobal(*GV),
- getModRefInfoForArgument(Call, GV));
+ getModRefInfoForArgument(Call, GV, AAQI));
if (!isModOrRefSet(Known))
return ModRefInfo::NoModRef; // No need to query other mod/ref analyses
- return intersectModRef(Known, AAResultBase::getModRefInfo(Call, Loc));
+ return intersectModRef(Known, AAResultBase::getModRefInfo(Call, Loc, AAQI));
}
GlobalsAAResult::GlobalsAAResult(const DataLayout &DL,
// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
// ignored if IsClobber = false.
-static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
- const MemoryLocation &UseLoc,
- const Instruction *UseInst,
- AliasAnalysis &AA) {
+template <typename AliasAnalysisType>
+static ClobberAlias
+instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
+ const Instruction *UseInst, AliasAnalysisType &AA) {
Instruction *DefInst = MD->getMemoryInst();
assert(DefInst && "Defining instruction not actually an instruction");
const auto *UseCall = dyn_cast<CallBase>(UseInst);
return {isModSet(I), AR};
}
+template <typename AliasAnalysisType>
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
const MemoryUseOrDef *MU,
const MemoryLocOrCall &UseMLOC,
- AliasAnalysis &AA) {
+ AliasAnalysisType &AA) {
// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
// to exist while MemoryLocOrCall is pushed through places.
if (UseMLOC.IsCall)
} // end anonymous namespace
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
- AliasAnalysis &AA) {
+ BatchAAResults &AA) {
Instruction *Inst = MD->getMemoryInst();
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_end:
- return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
+ return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
default:
return false;
}
return false;
}
-static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
+template <typename AliasAnalysisType>
+static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
- AA.pointsToConstantMemory(cast<LoadInst>(I)->
- getPointerOperand()));
+ AA.pointsToConstantMemory(MemoryLocation(
+ cast<LoadInst>(I)->getPointerOperand())));
}
/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
/// \param Query The UpwardsMemoryQuery we used for our search.
/// \param AA The AliasAnalysis we used for our search.
/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
+
+template <typename AliasAnalysisType>
LLVM_ATTRIBUTE_UNUSED static void
checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
const MemoryLocation &StartLoc, const MemorySSA &MSSA,
- const UpwardsMemoryQuery &Query, AliasAnalysis &AA,
+ const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
bool AllowImpreciseClobber = false) {
assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
/// in one class.
-class ClobberWalker {
+template <class AliasAnalysisType> class ClobberWalker {
/// Save a few bytes by using unsigned instead of size_t.
using ListIndex = unsigned;
};
const MemorySSA &MSSA;
- AliasAnalysis &AA;
+ AliasAnalysisType &AA;
DominatorTree &DT;
UpwardsMemoryQuery *Query;
}
public:
- ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
+ ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
: MSSA(MSSA), AA(AA), DT(DT) {}
+ AliasAnalysisType *getAA() { return &AA; }
/// Finds the nearest clobber for the given query, optimizing phis if
/// possible.
MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
namespace llvm {
-class MemorySSA::ClobberWalkerBase {
- ClobberWalker Walker;
+template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
+ ClobberWalker<AliasAnalysisType> Walker;
MemorySSA *MSSA;
public:
- ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
+ ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
: Walker(*M, *A, *D), MSSA(M) {}
MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
/// longer does caching on its own, but the name has been retained for the
/// moment.
+template <class AliasAnalysisType>
class MemorySSA::CachingWalker final : public MemorySSAWalker {
- ClobberWalkerBase *Walker;
+ ClobberWalkerBase<AliasAnalysisType> *Walker;
public:
- CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
+ CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
: MemorySSAWalker(M), Walker(W) {}
~CachingWalker() override = default;
using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
+ return Walker->getClobberingMemoryAccessBase(MA, false);
+ }
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) override;
+ const MemoryLocation &Loc) override {
+ return Walker->getClobberingMemoryAccessBase(MA, Loc);
+ }
void invalidateInfo(MemoryAccess *MA) override {
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
}
};
+template <class AliasAnalysisType>
class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
- ClobberWalkerBase *Walker;
+ ClobberWalkerBase<AliasAnalysisType> *Walker;
public:
- SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
+ SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
: MemorySSAWalker(M), Walker(W) {}
~SkipSelfWalker() override = default;
using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
+ return Walker->getClobberingMemoryAccessBase(MA, true);
+ }
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) override;
+ const MemoryLocation &Loc) override {
+ return Walker->getClobberingMemoryAccessBase(MA, Loc);
+ }
void invalidateInfo(MemoryAccess *MA) override {
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
}
MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
- : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
+ : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
SkipWalker(nullptr), NextID(0) {
- buildMemorySSA();
+ // Build MemorySSA using a batch alias analysis. This reuses the internal
+ // state that AA collects during an alias()/getModRefInfo() call. This is
+ // safe because there are no CFG changes while building MemorySSA and can
+ // significantly reduce the time spent by the compiler in AA, because we will
+ // make queries about all the instructions in the Function.
+ BatchAAResults BatchAA(*AA);
+ buildMemorySSA(BatchAA);
+ // Intentionally leave AA to nullptr while building so we don't accidently
+ // use non-batch AliasAnalysis.
+ this->AA = AA;
+ // Also create the walker here.
+ getWalker();
}
MemorySSA::~MemorySSA() {
/// which is walking bottom-up.
class MemorySSA::OptimizeUses {
public:
- OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
+ OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, BatchAAResults *BAA,
DominatorTree *DT)
- : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {}
+ : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
void optimizeUses();
MemorySSA *MSSA;
MemorySSAWalker *Walker;
- AliasAnalysis *AA;
+ BatchAAResults *AA;
DominatorTree *DT;
};
createMemoryPhi(BB);
}
-void MemorySSA::buildMemorySSA() {
+void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
// We create an access to represent "live on entry", for things like
// arguments or users of globals, where the memory they use is defined before
// the beginning of the function. We do not actually insert it into the IR.
AccessList *Accesses = nullptr;
DefsList *Defs = nullptr;
for (Instruction &I : B) {
- MemoryUseOrDef *MUD = createNewAccess(&I);
+ MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
if (!MUD)
continue;
SmallPtrSet<BasicBlock *, 16> Visited;
renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
- CachingWalker *Walker = getWalkerImpl();
-
- OptimizeUses(this, Walker, AA, DT).optimizeUses();
+ ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
+ CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
+ OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
// Mark the uses in unreachable blocks as live on entry, so that they go
// somewhere.
MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
-MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
+MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
if (Walker)
return Walker.get();
if (!WalkerBase)
- WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+ WalkerBase =
+ llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
- Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
+ Walker =
+ llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
return Walker.get();
}
return SkipWalker.get();
if (!WalkerBase)
- WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+ WalkerBase =
+ llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
- SkipWalker = llvm::make_unique<SkipSelfWalker>(this, WalkerBase.get());
+ SkipWalker =
+ llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
return SkipWalker.get();
}
MemoryAccess *Definition,
const MemoryUseOrDef *Template) {
assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
- MemoryUseOrDef *NewAccess = createNewAccess(I, Template);
+ MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
assert(
NewAccess != nullptr &&
"Tried to create a memory access for a non-memory touching instruction");
}
/// Helper function to create new memory accesses
+template <typename AliasAnalysisType>
MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
+ AliasAnalysisType *AAP,
const MemoryUseOrDef *Template) {
// The assume intrinsic has a control dependency which we model by claiming
// that it writes arbitrarily. Ignore that fake memory dependency here.
Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
#if !defined(NDEBUG)
- ModRefInfo ModRef = AA->getModRefInfo(I, None);
+ ModRefInfo ModRef = AAP->getModRefInfo(I, None);
bool DefCheck, UseCheck;
DefCheck = isModSet(ModRef) || isOrdered(I);
UseCheck = isRefSet(ModRef);
#endif
} else {
// Find out what affect this instruction has on memory.
- ModRefInfo ModRef = AA->getModRefInfo(I, None);
+ ModRefInfo ModRef = AAP->getModRefInfo(I, None);
// The isOrdered check is used to ensure that volatiles end up as defs
// (atomics end up as ModRef right now anyway). Until we separate the
// ordering chain from the memory chain, this enables people to see at least
MUD->setDefiningAccess(nullptr);
// Invalidate our walker's cache if necessary
if (!isa<MemoryUse>(MA))
- Walker->invalidateInfo(MA);
+ getWalker()->invalidateInfo(MA);
Value *MemoryInst;
if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
/// the MemoryAccess that actually clobbers Loc.
///
/// \returns our clobbering memory access
-MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
+template <typename AliasAnalysisType>
+MemoryAccess *
+MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
if (isa<MemoryPhi>(StartingAccess))
return StartingAccess;
return Clobber;
}
+template <typename AliasAnalysisType>
MemoryAccess *
-MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
- bool SkipSelf) {
+MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
+ MemoryAccess *MA, bool SkipSelf) {
auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
// If this is a MemoryPhi, we can't do anything.
if (!StartingAccess)
UpwardsMemoryQuery Q(I, StartingAccess);
- if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
+ if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
StartingAccess->setOptimized(LiveOnEntry);
StartingAccess->setOptimizedAccessType(None);
}
MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
- return Walker->getClobberingMemoryAccessBase(MA, false);
-}
-
-MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) {
- return Walker->getClobberingMemoryAccessBase(MA, Loc);
-}
-
-MemoryAccess *
-MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
- return Walker->getClobberingMemoryAccessBase(MA, true);
-}
-
-MemoryAccess *
-MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) {
- return Walker->getClobberingMemoryAccessBase(MA, Loc);
-}
-
-MemoryAccess *
DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
return Use->getDefiningAccess();
using namespace llvm::objcarc;
AliasResult ObjCARCAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
if (!EnableARCOpts)
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
// First, strip off no-ops, including ObjC-specific no-ops, and try making a
// precise alias query.
const Value *SB = GetRCIdentityRoot(LocB.Ptr);
AliasResult Result =
AAResultBase::alias(MemoryLocation(SA, LocA.Size, LocA.AATags),
- MemoryLocation(SB, LocB.Size, LocB.AATags));
+ MemoryLocation(SB, LocB.Size, LocB.AATags), AAQI);
if (Result != MayAlias)
return Result;
const Value *UA = GetUnderlyingObjCPtr(SA, DL);
const Value *UB = GetUnderlyingObjCPtr(SB, DL);
if (UA != SA || UB != SB) {
- Result = AAResultBase::alias(MemoryLocation(UA), MemoryLocation(UB));
+ Result = AAResultBase::alias(MemoryLocation(UA), MemoryLocation(UB), AAQI);
// We can't use MustAlias or PartialAlias results here because
// GetUnderlyingObjCPtr may return an offsetted pointer value.
if (Result == NoAlias)
}
bool ObjCARCAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
- bool OrLocal) {
+ AAQueryInfo &AAQI, bool OrLocal) {
if (!EnableARCOpts)
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
// First, strip off no-ops, including ObjC-specific no-ops, and try making
// a precise alias query.
const Value *S = GetRCIdentityRoot(Loc.Ptr);
if (AAResultBase::pointsToConstantMemory(
- MemoryLocation(S, Loc.Size, Loc.AATags), OrLocal))
+ MemoryLocation(S, Loc.Size, Loc.AATags), AAQI, OrLocal))
return true;
// If that failed, climb to the underlying object, including climbing through
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *U = GetUnderlyingObjCPtr(S, DL);
if (U != S)
- return AAResultBase::pointsToConstantMemory(MemoryLocation(U), OrLocal);
+ return AAResultBase::pointsToConstantMemory(MemoryLocation(U), AAQI,
+ OrLocal);
// If that failed, fail. We don't need to chain here, since that's covered
// by the earlier precise query.
}
ModRefInfo ObjCARCAAResult::getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) {
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
if (!EnableARCOpts)
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
switch (GetBasicARCInstKind(Call)) {
case ARCInstKind::Retain:
break;
}
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
}
ObjCARCAAResult ObjCARCAA::run(Function &F, FunctionAnalysisManager &AM) {
using namespace llvm;
AliasResult SCEVAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB, AAQueryInfo &AAQI) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are. This allows the code below to ignore this special
// case.
AO ? AAMDNodes() : LocA.AATags),
MemoryLocation(BO ? BO : LocB.Ptr,
BO ? LocationSize::unknown() : LocB.Size,
- BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
+ BO ? AAMDNodes() : LocB.AATags),
+ AAQI) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
}
/// Given an expression, try to find a base value.
} // end anonymous namespace
AliasResult ScopedNoAliasAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
if (!EnableScopedNoAlias)
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
// Get the attached MDNodes.
const MDNode *AScopes = LocA.AATags.Scope, *BScopes = LocB.AATags.Scope;
return NoAlias;
// If they may alias, chain to the next AliasAnalysis.
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
}
ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) {
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
if (!EnableScopedNoAlias)
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
if (!mayAliasInScopes(Loc.AATags.Scope,
Call->getMetadata(LLVMContext::MD_noalias)))
Loc.AATags.NoAlias))
return ModRefInfo::NoModRef;
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
}
ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call1,
- const CallBase *Call2) {
+ const CallBase *Call2,
+ AAQueryInfo &AAQI) {
if (!EnableScopedNoAlias)
- return AAResultBase::getModRefInfo(Call1, Call2);
+ return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
if (!mayAliasInScopes(Call1->getMetadata(LLVMContext::MD_alias_scope),
Call2->getMetadata(LLVMContext::MD_noalias)))
Call1->getMetadata(LLVMContext::MD_noalias)))
return ModRefInfo::NoModRef;
- return AAResultBase::getModRefInfo(Call1, Call2);
+ return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
}
static void collectMDInDomain(const MDNode *List, const MDNode *Domain,
}
AliasResult TypeBasedAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
if (!EnableTBAA)
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
// If accesses may alias, chain to the next AliasAnalysis.
if (Aliases(LocA.AATags.TBAA, LocB.AATags.TBAA))
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
// Otherwise return a definitive result.
return NoAlias;
}
bool TypeBasedAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
+ AAQueryInfo &AAQI,
bool OrLocal) {
if (!EnableTBAA)
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
const MDNode *M = Loc.AATags.TBAA;
if (!M)
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
// If this is an "immutable" type, we can assume the pointer is pointing
// to constant memory.
(isStructPathTBAA(M) && TBAAStructTagNode(M).isTypeImmutable()))
return true;
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
}
FunctionModRefBehavior
}
ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call,
- const MemoryLocation &Loc) {
+ const MemoryLocation &Loc,
+ AAQueryInfo &AAQI) {
if (!EnableTBAA)
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
if (const MDNode *L = Loc.AATags.TBAA)
if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(L, M))
return ModRefInfo::NoModRef;
- return AAResultBase::getModRefInfo(Call, Loc);
+ return AAResultBase::getModRefInfo(Call, Loc, AAQI);
}
ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call1,
- const CallBase *Call2) {
+ const CallBase *Call2,
+ AAQueryInfo &AAQI) {
if (!EnableTBAA)
- return AAResultBase::getModRefInfo(Call1, Call2);
+ return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
if (const MDNode *M1 = Call1->getMetadata(LLVMContext::MD_tbaa))
if (const MDNode *M2 = Call2->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(M1, M2))
return ModRefInfo::NoModRef;
- return AAResultBase::getModRefInfo(Call1, Call2);
+ return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
}
bool MDNode::isTBAAVtableAccess() const {
}
AliasResult AMDGPUAAResult::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
+ const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
unsigned asA = LocA.Ptr->getType()->getPointerAddressSpace();
unsigned asB = LocB.Ptr->getType()->getPointerAddressSpace();
return Result;
// Forward the query to the next alias analysis.
- return AAResultBase::alias(LocA, LocB);
+ return AAResultBase::alias(LocA, LocB, AAQI);
}
bool AMDGPUAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
- bool OrLocal) {
+ AAQueryInfo &AAQI, bool OrLocal) {
const Value *Base = GetUnderlyingObject(Loc.Ptr, DL);
unsigned AS = Base->getType()->getPointerAddressSpace();
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
// Only assume constant memory for arguments on kernels.
switch (F->getCallingConv()) {
default:
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
case CallingConv::AMDGPU_LS:
case CallingConv::AMDGPU_HS:
case CallingConv::AMDGPU_ES:
return true;
}
}
- return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
+ return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
}
/// By definition, this result is stateless and so remains valid.
bool invalidate(Function &, const PreservedAnalyses &) { return false; }
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
- bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+ bool OrLocal);
private:
bool Aliases(const MDNode *A, const MDNode *B) const;
bool invalidate(Function &, const PreservedAnalyses &) { return false; }
- AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+ AAQueryInfo &AAQI) {
CB();
return MayAlias;
}
DominatorTree DT;
AssumptionCache AC;
BasicAAResult BAA;
+ AAQueryInfo AAQI;
TestAnalyses(BasicAATest &Test)
- : DT(*Test.F), AC(*Test.F), BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {}
+ : DT(*Test.F), AC(*Test.F), BAA(Test.DL, *Test.F, Test.TLI, AC, &DT),
+ AAQI() {}
};
llvm::Optional<TestAnalyses> Analyses;
- BasicAAResult &setupAnalyses() {
+ TestAnalyses &setupAnalyses() {
assert(F);
Analyses.emplace(*this);
- return Analyses->BAA;
+ return Analyses.getValue();
}
public:
GlobalPtr->setLinkage(GlobalValue::LinkageTypes::InternalLinkage);
GlobalPtr->setInitializer(B.getInt8(0));
- BasicAAResult &BasicAA = setupAnalyses();
+ auto &AllAnalyses = setupAnalyses();
+ BasicAAResult &BasicAA = AllAnalyses.BAA;
+ AAQueryInfo &AAQI = AllAnalyses.AAQI;
ASSERT_EQ(
BasicAA.alias(MemoryLocation(IncomingI32Ptr, LocationSize::precise(4)),
- MemoryLocation(GlobalPtr, LocationSize::precise(1))),
+ MemoryLocation(GlobalPtr, LocationSize::precise(1)), AAQI),
AliasResult::NoAlias);
ASSERT_EQ(
BasicAA.alias(MemoryLocation(IncomingI32Ptr, LocationSize::upperBound(4)),
- MemoryLocation(GlobalPtr, LocationSize::precise(1))),
+ MemoryLocation(GlobalPtr, LocationSize::precise(1)), AAQI),
AliasResult::MayAlias);
}
auto *I8AtUncertainOffset =
cast<GetElementPtrInst>(B.CreateGEP(B.getInt8Ty(), I8, ArbitraryI32));
- BasicAAResult &BasicAA = setupAnalyses();
+ auto &AllAnalyses = setupAnalyses();
+ BasicAAResult &BasicAA = AllAnalyses.BAA;
+ AAQueryInfo &AAQI = AllAnalyses.AAQI;
ASSERT_EQ(BasicAA.alias(
MemoryLocation(I8, LocationSize::precise(2)),
- MemoryLocation(I8AtUncertainOffset, LocationSize::precise(1))),
+ MemoryLocation(I8AtUncertainOffset, LocationSize::precise(1)),
+ AAQI),
AliasResult::PartialAlias);
ASSERT_EQ(BasicAA.alias(
MemoryLocation(I8, LocationSize::upperBound(2)),
- MemoryLocation(I8AtUncertainOffset, LocationSize::precise(1))),
+ MemoryLocation(I8AtUncertainOffset, LocationSize::precise(1)),
+ AAQI),
AliasResult::MayAlias);
}