"This is not a truncating conversion!");
assert(isSCEVable(Ty) &&
"This is not a conversion to a SCEVable type!");
+ assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!");
Ty = getEffectiveSCEVType(Ty);
FoldingSetNodeID ID;
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
"This is not a conversion to a SCEVable type!");
+ assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
"This is not a conversion to a SCEVable type!");
+ assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVAddExpr operand types don't match!");
+ unsigned NumPtrs = count_if(
+ Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
+ assert(NumPtrs <= 1 && "add has at most one pointer operand");
#endif
// Sort by complexity, this groups all similar expression types together.
Ops.clear();
if (AccumulatedConstant != 0)
Ops.push_back(getConstant(AccumulatedConstant));
- for (auto &MulOp : MulOpLists)
- if (MulOp.first != 0)
+ for (auto &MulOp : MulOpLists) {
+ if (MulOp.first == 1) {
+ Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
+ } else if (MulOp.first != 0) {
Ops.push_back(getMulExpr(
getConstant(MulOp.first),
getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
SCEV::FlagAnyWrap, Depth + 1));
+ }
+ }
if (Ops.empty())
return getZero(Ty);
if (Ops.size() == 1)
assert(!Ops.empty() && "Cannot get empty mul!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = Ops[0]->getType();
+ assert(!ETy->isPointerTy());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
+ assert(Ops[i]->getType() == ETy &&
"SCEVMulExpr operand types don't match!");
#endif
/// possible.
const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *RHS) {
- assert(getEffectiveSCEVType(LHS->getType()) ==
- getEffectiveSCEVType(RHS->getType()) &&
+ assert(!LHS->getType()->isPointerTy() &&
+ "SCEVUDivExpr operand can't be pointer!");
+ assert(LHS->getType() == RHS->getType() &&
"SCEVUDivExpr operand types don't match!");
FoldingSetNodeID ID;
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
+ for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
+ assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer");
+ }
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
assert(isLoopInvariant(Operands[i], L) &&
"SCEVAddRecExpr operand is not loop-invariant!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
- for (unsigned i = 1, e = Ops.size(); i != e; ++i)
+ for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"Operand types don't match!");
+ assert(Ops[0]->getType()->isPointerTy() ==
+ Ops[i]->getType()->isPointerTy() &&
+ "min/max should be consistently pointerish");
+ }
#endif
bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
}
}
+ if (LHS->getType()->isPointerTy())
+ return false;
if (CmpInst::isSigned(Pred)) {
LHS = getSignExtendExpr(LHS, FoundLHS->getType());
RHS = getSignExtendExpr(RHS, FoundLHS->getType());
}
} else if (getTypeSizeInBits(LHS->getType()) >
getTypeSizeInBits(FoundLHS->getType())) {
+ if (FoundLHS->getType()->isPointerTy())
+ return false;
if (CmpInst::isSigned(FoundPred)) {
FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
const APInt &RA = RC->getAPInt();
// Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
// some folding.
- if (RA.isAllOnesValue())
+ if (RA.isAllOnesValue()) {
+ if (LHS->getType()->isPointerTy())
+ return nullptr;
return SE.getMulExpr(LHS, RC);
+ }
// Handle x /s 1 as x.
if (RA == 1)
return LHS;
// Determine the integer type for the base formula.
Type *DstTy = Base.getType();
if (!DstTy) return;
- DstTy = SE.getEffectiveSCEVType(DstTy);
+ if (DstTy->isPointerTy())
+ return;
for (Type *SrcTy : Types) {
if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
if (F.BaseGV) {
// Flush the operand list to suppress SCEVExpander hoisting.
if (!Ops.empty()) {
- Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty);
+ Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), IntTy);
Ops.clear();
Ops.push_back(SE.getUnknown(FullV));
}
const SCEVAddRecExpr *Phi,
const SCEVAddRecExpr *Requested,
bool &InvertStep) {
+ // We can't transform to match a pointer PHI.
+ if (Phi->getType()->isPointerTy())
+ return false;
+
Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
}
// Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
- if (SE.getAddExpr(Requested->getStart(),
- SE.getNegativeSCEV(Requested)) == Phi) {
+ if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
InvertStep = true;
return true;
}
// Rewrite an AddRec in terms of the canonical induction variable, if
// its type is more narrow.
if (CanonicalIV &&
- SE.getTypeSizeInBits(CanonicalIV->getType()) >
- SE.getTypeSizeInBits(Ty)) {
+ SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
+ !S->getType()->isPointerTy()) {
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
const SCEV *S1 = SE.getSCEV(V1);
const SCEV *S2 = SE.getSCEV(V2);
- const SCEV *P0 = SE.getAddExpr(S0, S0);
- const SCEV *P1 = SE.getAddExpr(S1, S1);
- const SCEV *P2 = SE.getAddExpr(S2, S2);
+ const SCEV *P0 = SE.getAddExpr(S0, SE.getConstant(S0->getType(), 2));
+ const SCEV *P1 = SE.getAddExpr(S1, SE.getConstant(S0->getType(), 2));
+ const SCEV *P2 = SE.getAddExpr(S2, SE.getConstant(S0->getType(), 2));
- const SCEVMulExpr *M0 = cast<SCEVMulExpr>(P0);
- const SCEVMulExpr *M1 = cast<SCEVMulExpr>(P1);
- const SCEVMulExpr *M2 = cast<SCEVMulExpr>(P2);
+ auto *M0 = cast<SCEVAddExpr>(P0);
+ auto *M1 = cast<SCEVAddExpr>(P1);
+ auto *M2 = cast<SCEVAddExpr>(P2);
EXPECT_EQ(cast<SCEVConstant>(M0->getOperand(0))->getValue()->getZExtValue(),
2u);
ReturnInst::Create(Context, nullptr, EndBB);
ScalarEvolution SE = buildSE(*F);
const SCEV *S = SE.getSCEV(Accum);
+ S = SE.getLosslessPtrToIntExpr(S);
Type *I128Ty = Type::getInt128Ty(Context);
SE.getZeroExtendExpr(S, I128Ty);
}