The m_VScale() matcher is unusual in that it requires a DataLayout.
It is currently used to determine the size of the GEP type. However,
I believe it is sufficient to check for the canonical
<vscale x 1 x i8> form here -- I don't think there's a need to
recognize exotic variations like <vscale x 1 x i4> as a vscale
constant representation as well.
Differential Revision: https://reviews.llvm.org/D144566
/// `ptrtoint(gep <vscale x 1 x i8>, <vscale x 1 x i8>* null, i32 1>`
/// under the right conditions determined by DataLayout.
struct VScaleVal_match {
- const DataLayout &DL;
- VScaleVal_match(const DataLayout &DL) : DL(DL) {}
-
template <typename ITy> bool match(ITy *V) {
if (m_Intrinsic<Intrinsic::vscale>().match(V))
return true;
Value *Ptr;
if (m_PtrToInt(m_Value(Ptr)).match(V)) {
if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
- auto *DerefTy = GEP->getSourceElementType();
- if (GEP->getNumIndices() == 1 && isa<ScalableVectorType>(DerefTy) &&
+ auto *DerefTy =
+ dyn_cast<ScalableVectorType>(GEP->getSourceElementType());
+ if (GEP->getNumIndices() == 1 && DerefTy &&
+ DerefTy->getElementType()->isIntegerTy(8) &&
m_Zero().match(GEP->getPointerOperand()) &&
- m_SpecificInt(1).match(GEP->idx_begin()->get()) &&
- DL.getTypeAllocSizeInBits(DerefTy).getKnownMinValue() == 8)
+ m_SpecificInt(1).match(GEP->idx_begin()->get()))
return true;
}
}
}
};
-inline VScaleVal_match m_VScale(const DataLayout &DL) {
- return VScaleVal_match(DL);
+inline VScaleVal_match m_VScale() {
+ return VScaleVal_match();
}
template <typename LHS, typename RHS, unsigned Opcode, bool Commutable = false>
TLI.getPointerTy(DAG.getDataLayout(), AS));
}
- if (match(C, m_VScale(DAG.getDataLayout())))
+ if (match(C, m_VScale()))
return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
visitTargetIntrinsic(I, Intrinsic);
return;
case Intrinsic::vscale: {
- match(&I, m_VScale(DAG.getDataLayout()));
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
return;
// Check whether "W == vscale * EC.getKnownMinValue()"
if (EC.isScalable()) {
- // Undig the DL
- const auto *ParMod = this->getModule();
- if (!ParMod)
- return false;
- const auto &DL = ParMod->getDataLayout();
-
// Compare vscale patterns
uint64_t VScaleFactor;
- if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL))))
+ if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale())))
return VScaleFactor >= EC.getKnownMinValue();
- return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL));
+ return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale());
}
// standard SIMD operation
}
}
- if (match(Src, m_VScale(DL))) {
+ if (match(Src, m_VScale())) {
if (Trunc.getFunction() &&
Trunc.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
Attribute Attr =
return BinaryOperator::CreateAnd(X, ZextC);
}
- if (match(Src, m_VScale(DL))) {
+ if (match(Src, m_VScale())) {
if (Zext.getFunction() &&
Zext.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
Attribute Attr =
}
}
- if (match(Src, m_VScale(DL))) {
+ if (match(Src, m_VScale())) {
if (Sext.getFunction() &&
Sext.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
Attribute Attr =
Value *NullPtrVec = Constant::getNullValue(VecPtrTy);
Value *GEP = IRB.CreateGEP(VecTy, NullPtrVec, IRB.getInt64(1));
Value *PtrToInt = IRB.CreatePtrToInt(GEP, DL.getIntPtrType(GEP->getType()));
- EXPECT_TRUE(match(PtrToInt, m_VScale(DL)));
+ EXPECT_TRUE(match(PtrToInt, m_VScale()));
// This used to cause assertion failures when attempting to match m_VScale.
// With opaque pointers the bitcast is no longer present.
Value *GEP2 = IRB.CreateGEP(VecTy, BitCast, IRB.getInt64(1));
Value *PtrToInt2 =
IRB.CreatePtrToInt(GEP2, DL.getIntPtrType(GEP2->getType()));
- EXPECT_TRUE(match(PtrToInt2, m_VScale(DL)));
+ EXPECT_TRUE(match(PtrToInt2, m_VScale()));
}
TEST_F(PatternMatchTest, NotForbidUndef) {