int isConsecutivePtr(Type *AccessTy, Value *Ptr) const;
/// Returns true if the value V is uniform within the loop.
- bool isUniform(Value *V);
+ bool isUniform(Value *V) const;
/// A uniform memory op is a load or store which accesses the same memory
/// location on all lanes.
- bool isUniformMemOp(Instruction &I) {
- Value *Ptr = getLoadStorePointerOperand(&I);
- if (!Ptr)
- return false;
- // Note: There's nothing inherent which prevents predicated loads and
- // stores from being uniform. The current lowering simply doesn't handle
- // it; in particular, the cost model distinguishes scatter/gather from
- // scalar w/predication, and we currently rely on the scalar path.
- return isUniform(Ptr) && !blockNeedsPredication(I.getParent());
- }
+ bool isUniformMemOp(Instruction &I) const;
/// Returns the information that we collected about runtime memory check.
const RuntimePointerChecking *getRuntimePointerChecking() const {
return 0;
}
-bool LoopVectorizationLegality::isUniform(Value *V) {
+bool LoopVectorizationLegality::isUniform(Value *V) const {
return LAI->isUniform(V);
}
+bool LoopVectorizationLegality::isUniformMemOp(Instruction &I) const {
+ Value *Ptr = getLoadStorePointerOperand(&I);
+ if (!Ptr)
+ return false;
+ // Note: There's nothing inherent which prevents predicated loads and
+ // stores from being uniform. The current lowering simply doesn't handle
+ // it; in particular, the cost model distinguishes scatter/gather from
+ // scalar w/predication, and we currently rely on the scalar path.
+ return isUniform(Ptr) && !blockNeedsPredication(I.getParent());
+}
+
bool LoopVectorizationLegality::canVectorizeOuterLoop() {
assert(!TheLoop->isInnermost() && "We are not vectorizing an outer loop.");
// Store the result and return it at the end instead of exiting early, in case