if (CostKind != TTI::TCK_RecipThroughput)
return Cost;
- if (Src->isVectorTy() &&
- Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
+ if (Src->isVectorTy() && EVT::getEVT(Src).bitsLT(LT.second)) {
// This is a vector load that legalizes to a larger type than the vector
// itself. Unless the corresponding extending load or truncating store is
// legal, then this will scalarize.
if (!LDST->isSimple())
return false;
+ EVT LdStMemVT = LDST->getMemoryVT();
+
+ // Bail out when changing the scalable property, since we can't be sure that
+ // we're actually narrowing here.
+ if (LdStMemVT.isScalableVector() != MemVT.isScalableVector())
+ return false;
+
// Verify that we are actually reducing a load width here.
- if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits())
+ if (LdStMemVT.bitsLT(MemVT))
return false;
// Ensure that this isn't going to produce an unsupported memory access.
// after truncation.
if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- if (LN0->isSimple() &&
- LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
+ if (LN0->isSimple() && LN0->getMemoryVT().bitsLT(VT)) {
SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
VT, LN0->getChain(), LN0->getBasePtr(),
LN0->getMemoryVT(),
// more effectively move in the right direction and prevent falling down
// to scalarization in many cases due to the input vector being split too
// far.
- if ((SrcVT.getVectorMinNumElements() & 1) == 0 &&
- SrcVT.getSizeInBits() * 2 < DestVT.getSizeInBits()) {
+ if (SrcVT.getVectorElementCount().isKnownEven() &&
+ SrcVT.getScalarSizeInBits() * 2 < DestVT.getScalarSizeInBits()) {
LLVMContext &Ctx = *DAG.getContext();
EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx);
EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx);
LdChain.push_back(LdOp.getValue(1));
// Check if we can load the element with one instruction.
- if (LdWidth <= NewVTWidth) {
+ if (TypeSize::isKnownLE(LdWidth, NewVTWidth)) {
if (!NewVT.isVector()) {
unsigned NumElts = WidenWidth.getFixedSize() / NewVTWidth.getFixedSize();
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
IncrementPointer(cast<LoadSDNode>(LdOp), NewVT, MPI, BasePtr,
&ScaledOffset);
- if (LdWidth < NewVTWidth) {
+ if (TypeSize::isKnownLT(LdWidth, NewVTWidth)) {
// The current type we are using is too large. Find a better size.
NewVT = FindMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
WidthDiff.getKnownMinSize());
LdOps.push_back(L);
LdOp = L;
- } while (LdWidth > NewVTWidth);
+ } while (TypeSize::isKnownGT(LdWidth, NewVTWidth));
// Build the vector from the load operations.
unsigned End = LdOps.size();
IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
&ScaledOffset);
- } while (StWidth.isNonZero() && StWidth >= NewVTWidth);
+ } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
} else {
// Cast the vector to the scalar type we can store.
unsigned NumElts = ValWidth.getFixedSize() / NewVTWidth.getFixedSize();
StWidth -= NewVTWidth;
IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
- } while (StWidth.isNonZero() && StWidth >= NewVTWidth);
+ } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
// Restore index back to be relative to the original widen element type.
Idx = Idx * NewVTWidth.getFixedSize() / ValEltWidth;
}
// are the same size, this is an obvious bitcast.
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
- } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
+ } else if (ValueVT.bitsLT(PartEVT)) {
// Bitcast Val back the original type and extract the corresponding
// vector we want.
unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
// Promoted vector extract
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
} else {
- if (ValueVT.getVectorNumElements() == 1) {
+ if (ValueVT.getVectorElementCount().isScalar()) {
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
DAG.getVectorIdxConstant(0, DL));
} else {
- assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
+ uint64_t ValueSize = ValueVT.getFixedSizeInBits();
+ assert(PartVT.getFixedSizeInBits() > ValueSize &&
"lossy conversion of vector to scalar type");
- EVT IntermediateType =
- EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
+ EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
Val = DAG.getBitcast(IntermediateType, Val);
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
}
// Truncate the result if SetCC returns a larger type than needed.
EVT RType = Node->getValueType(1);
- if (RType.getSizeInBits() < Overflow.getValueSizeInBits())
+ if (RType.bitsLT(Overflow.getValueType()))
Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow);
assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() &&