///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
- const ValueToValueMap &Strides);
+ const DenseMap<Value *, const SCEV *> &Strides);
/// No memory dependence was encountered that would inhibit
/// vectorization.
/// Otherwise, this function returns true signaling a possible dependence.
Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
const MemAccessInfo &B, unsigned BIdx,
- const ValueToValueMap &Strides);
+ const DenseMap<Value *, const SCEV *> &Strides);
/// Check whether the data dependence could prevent store-load
/// forwarding.
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
- const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
+ const DenseMap<Value *, const SCEV *> &getSymbolicStrides() const {
+ return SymbolicStrides;
+ }
/// Pointer has a symbolic stride.
bool hasStride(Value *V) const { return StrideSet.count(V); }
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
- ValueToValueMap SymbolicStrides;
+ DenseMap<Value *, const SCEV *> SymbolicStrides;
/// Set of symbolic strides values.
SmallPtrSet<Value *, 8> StrideSet;
///
/// \p PtrToStride provides the mapping between the pointer value and its
/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
-const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
- const ValueToValueMap &PtrToStride,
- Value *Ptr);
+const SCEV *
+replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
+ const DenseMap<Value *, const SCEV *> &PtrToStride,
+ Value *Ptr);
/// If the pointer has a constant stride return it in units of the access type
/// size. Otherwise return std::nullopt.
std::optional<int64_t>
getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
const Loop *Lp,
- const ValueToValueMap &StridesMap = ValueToValueMap(),
+ const DenseMap<Value *, const SCEV *> &StridesMap = DenseMap<Value *, const SCEV *>(),
bool Assume = false, bool ShouldCheckWrap = true);
/// Returns the distance between the pointers \p PtrA and \p PtrB iff they are
}
const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
- const ValueToValueMap &PtrToStride,
+ const DenseMap<Value *, const SCEV *> &PtrToStride,
Value *Ptr) {
const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
// If there is an entry in the map return the SCEV of the pointer with the
// symbolic stride replaced by one.
- ValueToValueMap::const_iterator SI = PtrToStride.find(Ptr);
+ DenseMap<Value *, const SCEV *>::const_iterator SI = PtrToStride.find(Ptr);
if (SI == PtrToStride.end())
// For a non-symbolic stride, just return the original expression.
return OrigSCEV;
- Value *StrideVal = stripIntegerCast(SI->second);
-
- ScalarEvolution *SE = PSE.getSE();
- const SCEV *StrideSCEV = SE->getSCEV(StrideVal);
+ const SCEV *StrideSCEV = SI->second;
+ // Note: This assert is both overly strong and overly weak. The actual
+ // invariant here is that StrideSCEV should be loop invariant. The only
+ // such invariant strides we happen to speculate right now are unknowns
+ // and thus this is a reasonable proxy of the actual invariant.
assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
+ ScalarEvolution *SE = PSE.getSE();
const auto *CT = SE->getOne(StrideSCEV->getType());
PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
auto *Expr = PSE.getSCEV(Ptr);
/// the bounds of the pointer.
bool createCheckForAccess(RuntimePointerChecking &RtCheck,
MemAccessInfo Access, Type *AccessTy,
- const ValueToValueMap &Strides,
+ const DenseMap<Value *, const SCEV *> &Strides,
DenseMap<Value *, unsigned> &DepSetId,
Loop *TheLoop, unsigned &RunningDepId,
unsigned ASId, bool ShouldCheckStride, bool Assume);
/// Returns true if we need no check or if we do and we can generate them
/// (i.e. the pointers have computable bounds).
bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
- Loop *TheLoop, const ValueToValueMap &Strides,
+ Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
Value *&UncomputablePtr, bool ShouldCheckWrap = false);
/// Goes over all memory accesses, checks whether a RT check is needed
/// Check whether a pointer address cannot wrap.
static bool isNoWrap(PredicatedScalarEvolution &PSE,
- const ValueToValueMap &Strides, Value *Ptr, Type *AccessTy,
+ const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
Loop *L) {
const SCEV *PtrScev = PSE.getSCEV(Ptr);
if (PSE.getSE()->isLoopInvariant(PtrScev, L))
static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
findForkedPointer(PredicatedScalarEvolution &PSE,
- const ValueToValueMap &StridesMap, Value *Ptr,
+ const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
const Loop *L) {
ScalarEvolution *SE = PSE.getSE();
assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
MemAccessInfo Access, Type *AccessTy,
- const ValueToValueMap &StridesMap,
+ const DenseMap<Value *, const SCEV *> &StridesMap,
DenseMap<Value *, unsigned> &DepSetId,
Loop *TheLoop, unsigned &RunningDepId,
unsigned ASId, bool ShouldCheckWrap,
bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
ScalarEvolution *SE, Loop *TheLoop,
- const ValueToValueMap &StridesMap,
+ const DenseMap<Value *, const SCEV *> &StridesMap,
Value *&UncomputablePtr, bool ShouldCheckWrap) {
// Find pointers with computable bounds. We are going to use this information
// to place a runtime bound check.
std::optional<int64_t> llvm::getPtrStride(PredicatedScalarEvolution &PSE,
Type *AccessTy, Value *Ptr,
const Loop *Lp,
- const ValueToValueMap &StridesMap,
+ const DenseMap<Value *, const SCEV *> &StridesMap,
bool Assume, bool ShouldCheckWrap) {
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
MemoryDepChecker::Dependence::DepType
MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const MemAccessInfo &B, unsigned BIdx,
- const ValueToValueMap &Strides) {
+ const DenseMap<Value *, const SCEV *> &Strides) {
assert (AIdx < BIdx && "Must pass arguments in program order");
auto [APtr, AIsWrite] = A;
bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
MemAccessInfoList &CheckDeps,
- const ValueToValueMap &Strides) {
+ const DenseMap<Value *, const SCEV *> &Strides) {
MaxSafeDepDistBytes = -1;
SmallPtrSet<MemAccessInfo, 8> Visited;
if (!Ptr)
return;
+ // Note: getStrideFromPointer is a *profitability* heuristic. We
+ // could broaden the scope of values returned here - to anything
+ // which happens to be loop invariant and contributes to the
+ // computation of an interesting IV - but we chose not to as we
+ // don't have a cost model here, and broadening the scope exposes
+ // far too many unprofitable cases.
Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
if (!Stride)
return;
}
LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
- SymbolicStrides[Ptr] = Stride;
+ // Strip back off the integer cast, and check that our result is a
+ // SCEVUnknown as we expect.
+ Value *StrideVal = stripIntegerCast(Stride);
+ SymbolicStrides[Ptr] = cast<SCEVUnknown>(PSE->getSCEV(StrideVal));
StrideSet.insert(Stride);
}