// These should be ordered in terms of increasing complexity to make the
// folders simpler.
scConstant,
- scVScale,
scTruncate,
scZeroExtend,
scSignExtend,
static bool classof(const SCEV *S) { return S->getSCEVType() == scConstant; }
};
-/// This class represents the value of vscale, as used when defining the length
-/// of a scalable vector or returned by the llvm.vscale() intrinsic.
-class SCEVVScale : public SCEV {
- friend class ScalarEvolution;
-
- SCEVVScale(const FoldingSetNodeIDRef ID, Type *ty)
- : SCEV(ID, scVScale, 0), Ty(ty) {}
-
- Type *Ty;
-
-public:
- Type *getType() const { return Ty; }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) { return S->getSCEVType() == scVScale; }
-};
-
inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
APInt Size(16, 1);
for (const auto *Arg : Args)
public:
Value *getValue() const { return getValPtr(); }
+ /// Check whether this represents vscale.
+ bool isVScale() const;
+
Type *getType() const { return getValPtr()->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
switch (S->getSCEVType()) {
case scConstant:
return ((SC *)this)->visitConstant((const SCEVConstant *)S);
- case scVScale:
- return ((SC *)this)->visitVScale((const SCEVVScale *)S);
case scPtrToInt:
return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
case scTruncate:
switch (S->getSCEVType()) {
case scConstant:
- case scVScale:
case scUnknown:
continue;
case scPtrToInt:
const SCEV *visitConstant(const SCEVConstant *Constant) { return Constant; }
- const SCEV *visitVScale(const SCEVVScale *VScale) { return VScale; }
-
const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
return Operand == Expr->getOperand()
case scConstant:
cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
return;
- case scVScale:
- OS << "vscale";
- return;
case scPtrToInt: {
const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
const SCEV *Op = PtrToInt->getOperand();
OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
return;
}
- case scUnknown:
- cast<SCEVUnknown>(this)->getValue()->printAsOperand(OS, false);
+ case scUnknown: {
+ const SCEVUnknown *U = cast<SCEVUnknown>(this);
+ if (U->isVScale()) {
+ OS << "vscale";
+ return;
+ }
+
+ // Otherwise just print it normally.
+ U->getValue()->printAsOperand(OS, false);
return;
+ }
case scCouldNotCompute:
OS << "***COULDNOTCOMPUTE***";
return;
switch (getSCEVType()) {
case scConstant:
return cast<SCEVConstant>(this)->getType();
- case scVScale:
- return cast<SCEVVScale>(this)->getType();
case scPtrToInt:
case scTruncate:
case scZeroExtend:
ArrayRef<const SCEV *> SCEV::operands() const {
switch (getSCEVType()) {
case scConstant:
- case scVScale:
case scUnknown:
return {};
case scPtrToInt:
return getConstant(ConstantInt::get(ITy, V, isSigned));
}
-const SCEV *ScalarEvolution::getVScale(Type *Ty) {
- FoldingSetNodeID ID;
- ID.AddInteger(scVScale);
- ID.AddPointer(Ty);
- void *IP = nullptr;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
- return S;
- SCEV *S = new (SCEVAllocator) SCEVVScale(ID.Intern(SCEVAllocator), Ty);
- UniqueSCEVs.InsertNode(S, IP);
- return S;
-}
-
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
const SCEV *op, Type *ty)
: SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {}
setValPtr(New);
}
+bool SCEVUnknown::isVScale() const {
+ return match(getValue(), m_VScale());
+}
+
//===----------------------------------------------------------------------===//
// SCEV Utilities
//===----------------------------------------------------------------------===//
return LA.ult(RA) ? -1 : 1;
}
- case scVScale: {
- const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(LHS)->getType());
- const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(RHS)->getType());
- return LTy->getBitWidth() - RTy->getBitWidth();
- }
-
case scAddRecExpr: {
const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
RetVal visitConstant(const SCEVConstant *Constant) { return Constant; }
- RetVal visitVScale(const SCEVVScale *VScale) { return VScale; }
-
RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; }
RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; }
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind) {
switch (Kind) {
case scConstant:
- case scVScale:
case scTruncate:
case scZeroExtend:
case scSignExtend:
if (!scevUnconditionallyPropagatesPoisonFromOperands(S->getSCEVType())) {
switch (S->getSCEVType()) {
case scConstant:
- case scVScale:
case scTruncate:
case scZeroExtend:
case scSignExtend:
const SCEV *
ScalarEvolution::getSizeOfExpr(Type *IntTy, TypeSize Size) {
const SCEV *Res = getConstant(IntTy, Size.getKnownMinValue());
- if (Size.isScalable())
- Res = getMulExpr(Res, getVScale(IntTy));
+ if (Size.isScalable()) {
+ // TODO: Why is there no ConstantExpr::getVScale()?
+ Type *SrcElemTy = ScalableVectorType::get(Type::getInt8Ty(getContext()), 1);
+ Constant *NullPtr = Constant::getNullValue(SrcElemTy->getPointerTo());
+ Constant *One = ConstantInt::get(IntTy, 1);
+ Constant *GEP = ConstantExpr::getGetElementPtr(SrcElemTy, NullPtr, One);
+ Constant *VScale = ConstantExpr::getPtrToInt(GEP, IntTy);
+ Res = getMulExpr(Res, getUnknown(VScale));
+ }
return Res;
}
bool follow(const SCEV *S) {
switch (S->getSCEVType()) {
case scConstant:
- case scVScale:
case scPtrToInt:
case scTruncate:
case scZeroExtend:
switch (S->getSCEVType()) {
case scConstant:
return cast<SCEVConstant>(S)->getAPInt().countr_zero();
- case scVScale:
- return 0;
case scTruncate: {
const SCEVTruncateExpr *T = cast<SCEVTruncateExpr>(S);
return std::min(GetMinTrailingZeros(T->getOperand()),
break;
[[fallthrough]];
case scConstant:
- case scVScale:
case scTruncate:
case scZeroExtend:
case scSignExtend:
switch (S->getSCEVType()) {
case scConstant:
llvm_unreachable("Already handled above.");
- case scVScale:
- return setRange(S, SignHint, std::move(ConservativeResult));
case scTruncate: {
const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(S);
ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint, Depth + 1);
switch (V->getSCEVType()) {
case scCouldNotCompute:
case scAddRecExpr:
- case scVScale:
return nullptr;
case scConstant:
return cast<SCEVConstant>(V)->getValue();
const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
switch (V->getSCEVType()) {
case scConstant:
- case scVScale:
return V;
case scAddRecExpr: {
// If this is a loop recurrence for a loop that does not contain L, then we
case scSequentialUMinExpr:
return getSequentialMinMaxExpr(V->getSCEVType(), NewOps);
case scConstant:
- case scVScale:
case scAddRecExpr:
case scUnknown:
case scCouldNotCompute:
ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
switch (S->getSCEVType()) {
case scConstant:
- case scVScale:
return LoopInvariant;
case scAddRecExpr: {
const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
switch (S->getSCEVType()) {
case scConstant:
- case scVScale:
return ProperlyDominatesBlock;
case scAddRecExpr: {
// This uses a "dominates" query instead of "properly dominates" query