return MVT::i8;
if (Subtarget.hasAVX512()) {
- const unsigned NumElts = VT.getVectorNumElements();
-
// Figure out what this type will be legalized to.
EVT LegalVT = VT;
while (getTypeAction(Context, LegalVT) != TypeLegal)
// If we got a 512-bit vector then we'll definitely have a vXi1 compare.
if (LegalVT.getSimpleVT().is512BitVector())
- return EVT::getVectorVT(Context, MVT::i1, NumElts);
+ return VT.changeVectorElementType(MVT::i1);
if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
// If we legalized to less than a 512-bit vector, then we will use a vXi1
// vXi16/vXi8.
MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
- return EVT::getVectorVT(Context, MVT::i1, NumElts);
+ return VT.changeVectorElementType(MVT::i1);
}
}
const SDLoc &dl, unsigned vectorWidth) {
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
- unsigned Factor = VT.getSizeInBits()/vectorWidth;
+ unsigned Factor = VT.getSizeInBits() / vectorWidth;
EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
- VT.getVectorNumElements()/Factor);
+ VT.getVectorNumElements() / Factor);
// Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
// it to i32 first.
if (EltVT == MVT::i16 || EltVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
+ MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
return DAG.getBitcast(VT, Item);
SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
DAG.getBuildVector(NarrowVT, dl, Ops));
// Broadcast from v2i64/v2f64 and cast to final VT.
- MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
+ MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
NewBV));
}
// For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
if (VT.getSizeInBits() > 128) {
- MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
+ MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
// Build both the lower and upper subvector.
SDValue Lower =
if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
: RHS.getOperand(0).getValueType();
- unsigned NumSrcElts = SrcVT.getVectorNumElements();
- EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
+ EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
VT.getSizeInBits());
RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
if ((NumElts % 2) != 0)
return SDValue();
- EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
+ EVT ReducedVT = VT.changeVectorElementType(MVT::i16);
// Shrink the operands of mul.
SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
// Only do this when the result is at least 64 bits or we'll leaving
// dangling PACKSSDW nodes.
if (SVT == MVT::i8 && InSVT == MVT::i32) {
- EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
- VT.getVectorNumElements());
+ EVT MidVT = VT.changeVectorElementType(MVT::i16);
SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
DAG, Subtarget);
assert(Mid && "Failed to pack!");
EVT CastVT = VT;
if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
EltVT = MVT::f64;
- CastVT =
- EVT::getVectorVT(*DAG.getContext(), EltVT, VT.getVectorNumElements());
+ CastVT = VT.changeVectorElementType(EltVT);
}
SDValue Load =
EVT EltVT = VT.getVectorElementType();
if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
EltVT = MVT::f64;
- EVT CastVT =
- EVT::getVectorVT(*DAG.getContext(), EltVT, VT.getVectorNumElements());
+ EVT CastVT = VT.changeVectorElementType(EltVT);
Value = DAG.getBitcast(CastVT, Value);
}
SDValue Extract =
Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
// Compare against the bitmask and extend the result.
- EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
+ EVT CCVT = VT.changeVectorElementType(MVT::i1);
Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
if (BV->isConstant() && IndexWidth > 32 &&
DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
- unsigned NumElts = Index.getValueType().getVectorNumElements();
- EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
+ EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
}
IndexWidth > 32 &&
Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
- unsigned NumElts = Index.getValueType().getVectorNumElements();
- EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
+ EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
}
// Make sure the index is either i32 or i64
if (IndexWidth != 32 && IndexWidth != 64) {
MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
- EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- Index.getValueType().getVectorNumElements());
+ EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
}
// UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
SDLoc dl(N);
- EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
- InVT.getVectorNumElements());
+ EVT DstVT = InVT.changeVectorElementType(MVT::i32);
SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
// UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
// SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
SDLoc dl(N);
- EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
- InVT.getVectorNumElements());
+ EVT DstVT = InVT.changeVectorElementType(MVT::i32);
SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
if (IsStrict)
return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
if (NumSignBits >= (BitWidth - 31)) {
EVT TruncVT = MVT::i32;
if (InVT.isVector())
- TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
- InVT.getVectorNumElements());
+ TruncVT = InVT.changeVectorElementType(TruncVT);
SDLoc dl(N);
if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
? ISD::SEXTLOAD
: ISD::ZEXTLOAD;
- EVT MemVT =
- EVT::getVectorVT(*DAG.getContext(), SVT, VT.getVectorNumElements());
+ EVT MemVT = VT.changeVectorElementType(SVT);
if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
SDValue Load =
DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),