Into = ExprResult.Val.getInt();
if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
return false;
- Into = Into.zextOrSelf(BitsInSizeT);
+ Into = Into.zext(BitsInSizeT);
return true;
};
unsigned Bits =
std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth());
- llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits);
- llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits);
+ llvm::APInt InitBound = CAT->getSize().zext(Bits);
+ llvm::APInt AllocBound = ArrayBound.zext(Bits);
if (InitBound.ugt(AllocBound)) {
if (IsNothrow)
return ZeroInitialization(E);
for (unsigned i = 0; i < NElts; i++) {
llvm::APInt Elt;
if (BigEndian)
- Elt = SValInt.rotl(i*EltSize+FloatEltSize).truncOrSelf(FloatEltSize);
+ Elt = SValInt.rotl(i * EltSize + FloatEltSize).trunc(FloatEltSize);
else
- Elt = SValInt.rotr(i*EltSize).truncOrSelf(FloatEltSize);
+ Elt = SValInt.rotr(i * EltSize).trunc(FloatEltSize);
Elts.push_back(APValue(APFloat(Sem, Elt)));
}
} else if (EltTy->isIntegerType()) {
// to convert every integer to signed 64 bit before mangling (including
// unsigned 64 bit values). Do the same, but preserve bits beyond the bottom
// 64.
- llvm::APInt Value =
- Number.isSigned() ? Number.sextOrSelf(64) : Number.zextOrSelf(64);
+ unsigned Width = std::max(Number.getBitWidth(), 64U);
+ llvm::APInt Value = Number.extend(Width);
// <non-negative integer> ::= A@ # when Number == 0
// ::= <decimal digit> # when 1 <= Number <= 10
// Signed overflow occurs if the result is greater than INT_MAX or lesser
// than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
auto IntMax =
- llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
+ llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
llvm::Value *MaxResult =
CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
CGF.Builder.CreateZExt(IsNegative, OpTy));
const auto &EVal = E->getInitVal();
// Only single-bit enumerators introduce new flag values.
if (EVal.isPowerOf2())
- FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
+ FlagBits = FlagBits.zext(EVal.getBitWidth()) | EVal;
}
}
Matches[0].getNodeAs<IntegerLiteral>("initNum")->getValue();
auto CondOp = Matches[0].getNodeAs<BinaryOperator>("conditionOperator");
if (InitNum.getBitWidth() != BoundNum.getBitWidth()) {
- InitNum = InitNum.zextOrSelf(BoundNum.getBitWidth());
- BoundNum = BoundNum.zextOrSelf(InitNum.getBitWidth());
+ InitNum = InitNum.zext(BoundNum.getBitWidth());
+ BoundNum = BoundNum.zext(InitNum.getBitWidth());
}
if (CondOp->getOpcode() == BO_GE || CondOp->getOpcode() == BO_LE)
unsigned TypeSize =
DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
- Decomposed.Offset += LE.Offset.sextOrSelf(MaxIndexSize);
- APInt Scale = LE.Scale.sextOrSelf(MaxIndexSize);
+ Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
+ APInt Scale = LE.Scale.sext(MaxIndexSize);
// If we already had an occurrence of this index variable, merge this
// scale into it. For example, we want to handle:
return ConstantExpr::getBitCast(C, DestTy);
Result <<= BitShift;
- Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
+ Result |= ElementCI->getValue().zext(Result.getBitWidth());
}
return nullptr;
unsigned Width = C0->getBitWidth();
assert(Scale < Width && "Illegal scale.");
unsigned ExtendedWidth = Width * 2;
- APInt Product = (C0->sextOrSelf(ExtendedWidth) *
- C1->sextOrSelf(ExtendedWidth)).ashr(Scale);
+ APInt Product =
+ (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale);
if (IntrinsicID == Intrinsic::smul_fix_sat) {
- APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
- APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
+ APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth);
+ APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth);
Product = APIntOps::smin(Product, Max);
Product = APIntOps::smax(Product, Min);
}
ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
if (!CR.isEmptySet())
return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
- CR.getUnsignedMin().zextOrSelf(BitWidth), APInt(BitWidth, 0)));
+ CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
}
return ValueLatticeElement::getOverdefined();
if (!Arg)
return None;
- APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
+ APInt MaxSize = Arg->getValue().zext(IntTyBits);
if (Size.ugt(MaxSize))
Size = MaxSize + 1;
}
static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
if (X.hasValue() && Y.hasValue()) {
unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
- APInt XW = X->sextOrSelf(W);
- APInt YW = Y->sextOrSelf(W);
+ APInt XW = X->sext(W);
+ APInt YW = Y->sext(W);
return XW.slt(YW) ? *X : *Y;
}
if (!X.hasValue() && !Y.hasValue())
std::tie(A, B, C, M, BitWidth) = *T;
// Lower bound is inclusive, subtract 1 to represent the exiting value.
- APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
- APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
+ APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1;
+ APInt Upper = Range.getUpper().sext(A.getBitWidth());
auto SL = SolveForBoundary(Lower);
auto SU = SolveForBoundary(Upper);
// If any of the solutions was unknown, no meaninigful conclusions can
assert(Size % 8 == 0);
// Extend the element to take zero padding into account.
- APInt Value = CI->getValue().zextOrSelf(Size);
+ APInt Value = CI->getValue().zext(Size);
if (!Value.isSplat(8))
return -1;
unsigned NumBits = Ty.getScalarSizeInBits();
auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI);
if (!Ty.isVector() && ValVRegAndVal) {
- APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8);
+ APInt Scalar = ValVRegAndVal->Value.trunc(8);
APInt SplatVal = APInt::getSplat(NumBits, Scalar);
return MIB.buildConstant(Ty, SplatVal).getReg(0);
}
// We provide an Offset so that we can create bitwidths that won't overflow.
static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) {
unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth());
- LHS = LHS.zextOrSelf(Bits);
- RHS = RHS.zextOrSelf(Bits);
+ LHS = LHS.zext(Bits);
+ RHS = RHS.zext(Bits);
}
// Return true if this node is a setcc, or is a select_cc
return 0;
const APInt &C1 = N1C->getAPIntValue();
const APInt &C2 = N3C->getAPIntValue();
- if (C1.getBitWidth() < C2.getBitWidth() ||
- C1 != C2.sextOrSelf(C1.getBitWidth()))
+ if (C1.getBitWidth() < C2.getBitWidth() || C1 != C2.sext(C1.getBitWidth()))
return 0;
return CC == ISD::SETLT ? ISD::SMIN : (CC == ISD::SETGT ? ISD::SMAX : 0);
};
const APInt &C1 = N1C->getAPIntValue();
const APInt &C3 = N3C->getAPIntValue();
if (!(C1 + 1).isPowerOf2() || C1.getBitWidth() < C3.getBitWidth() ||
- C1 != C3.zextOrSelf(C1.getBitWidth()))
+ C1 != C3.zext(C1.getBitWidth()))
return SDValue();
unsigned BW = (C1 + 1).exactLogBase2();
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val;
if (TLI->signExtendConstant(CI))
- Val = CI->getValue().sextOrSelf(BitWidth);
+ Val = CI->getValue().sext(BitWidth);
else
- Val = CI->getValue().zextOrSelf(BitWidth);
+ Val = CI->getValue().zext(BitWidth);
DestLOI.NumSignBits = Val.getNumSignBits();
DestLOI.Known = KnownBits::makeConstant(Val);
} else {
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val;
if (TLI->signExtendConstant(CI))
- Val = CI->getValue().sextOrSelf(BitWidth);
+ Val = CI->getValue().sext(BitWidth);
else
- Val = CI->getValue().zextOrSelf(BitWidth);
+ Val = CI->getValue().zext(BitWidth);
DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
DestLOI.Known.Zero &= ~Val;
DestLOI.Known.One &= Val;
EVT VT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
APInt MulImm = cast<ConstantSDNode>(N->getOperand(0))->getAPIntValue();
- return DAG.getVScale(SDLoc(N), VT, MulImm.sextOrSelf(VT.getSizeInBits()));
+ return DAG.getVScale(SDLoc(N), VT, MulImm.sext(VT.getSizeInBits()));
}
SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
+ SplatVal = Op0->getAPIntValue().trunc(EltSize);
return true;
}
if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
- SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize);
+ SplatVal = Op0->getValueAPF().bitcastToAPInt().trunc(EltSize);
return true;
}
}
uint64_t Idx = V.getConstantOperandVal(1);
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
APInt UndefSrcElts;
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
return true;
return false;
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
APInt UndefSrcElts;
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts);
if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
- UndefElts = UndefSrcElts.truncOrSelf(NumElts);
+ UndefElts = UndefSrcElts.trunc(NumElts);
return true;
}
break;
break;
uint64_t Idx = Op.getConstantOperandVal(1);
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
break;
}
}
case ISD::ZERO_EXTEND_VECTOR_INREG: {
EVT InVT = Op.getOperand(0).getValueType();
- APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
+ APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
Known = Known.zext(BitWidth);
break;
}
case ISD::SIGN_EXTEND_VECTOR_INREG: {
EVT InVT = Op.getOperand(0).getValueType();
- APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
+ APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
// If the sign bit is known to be zero or one, then sext will extend
// it to the top bits, else it will just zext.
}
case ISD::ANY_EXTEND_VECTOR_INREG: {
EVT InVT = Op.getOperand(0).getValueType();
- APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
+ APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
Known = Known.anyext(BitWidth);
break;
case ISD::SIGN_EXTEND_VECTOR_INREG: {
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
+ APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements());
Tmp = VTBits - SrcVT.getScalarSizeInBits();
return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
}
break;
uint64_t Idx = Op.getConstantOperandVal(1);
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
}
case ISD::CONCAT_VECTORS: {
for (unsigned I = 0, E = DstBits.size(); I != E; ++I) {
if (DstUndefs[I])
continue;
- Ops[I] = getConstant(DstBits[I].sextOrSelf(BVEltBits), DL, BVEltVT);
+ Ops[I] = getConstant(DstBits[I].sext(BVEltBits), DL, BVEltVT);
}
return getBitcast(VT, getBuildVector(BVVT, DL, Ops));
}
auto *CInt = dyn_cast<ConstantSDNode>(Op);
auto *CFP = dyn_cast<ConstantFPSDNode>(Op);
assert((CInt || CFP) && "Unknown constant");
- SrcBitElements[I] =
- CInt ? CInt->getAPIntValue().truncOrSelf(SrcEltSizeInBits)
- : CFP->getValueAPF().bitcastToAPInt();
+ SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
+ : CFP->getValueAPF().bitcastToAPInt();
}
// Recast to dst width.
KnownBits SrcKnown;
SDValue Src = Op.getOperand(0);
unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
- APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth);
+ APInt SrcDemandedBits = DemandedBits.zext(SrcBitWidth);
if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1))
return true;
break;
uint64_t Idx = Op.getConstantOperandVal(1);
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO,
Depth + 1))
}
APInt InDemandedBits = DemandedBits.trunc(InBits);
- APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
+ APInt InDemandedElts = DemandedElts.zext(InElts);
if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
Depth + 1))
return true;
}
APInt InDemandedBits = DemandedBits.trunc(InBits);
- APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
+ APInt InDemandedElts = DemandedElts.zext(InElts);
// Since some of the sign extended bits are demanded, we know that the sign
// bit is demanded.
return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
APInt InDemandedBits = DemandedBits.trunc(InBits);
- APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
+ APInt InDemandedElts = DemandedElts.zext(InElts);
if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
Depth + 1))
return true;
break;
uint64_t Idx = Op.getConstantOperandVal(1);
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
APInt SrcUndef, SrcZero;
if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
APInt SrcUndef, SrcZero;
SDValue Src = Op.getOperand(0);
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
- APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
+ APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts);
if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
Depth + 1))
return true;
// floating-point values.
APInt MinInt, MaxInt;
if (IsSigned) {
- MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth);
- MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth);
+ MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
+ MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
} else {
- MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth);
- MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth);
+ MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
+ MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
}
// We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as
case Instruction::UIToFP: {
// TODO: use input range if available
auto BW = getBitWidth();
- APInt Min = APInt::getMinValue(BW).zextOrSelf(ResultBitWidth);
- APInt Max = APInt::getMaxValue(BW).zextOrSelf(ResultBitWidth);
+ APInt Min = APInt::getMinValue(BW);
+ APInt Max = APInt::getMaxValue(BW);
+ if (ResultBitWidth > BW) {
+ Min = Min.zext(ResultBitWidth);
+ Max = Max.zext(ResultBitWidth);
+ }
return ConstantRange(std::move(Min), std::move(Max));
}
case Instruction::SIToFP: {
// TODO: use input range if available
auto BW = getBitWidth();
- APInt SMin = APInt::getSignedMinValue(BW).sextOrSelf(ResultBitWidth);
- APInt SMax = APInt::getSignedMaxValue(BW).sextOrSelf(ResultBitWidth);
+ APInt SMin = APInt::getSignedMinValue(BW);
+ APInt SMax = APInt::getSignedMaxValue(BW);
+ if (ResultBitWidth > BW) {
+ SMin = SMin.sext(ResultBitWidth);
+ SMax = SMax.sext(ResultBitWidth);
+ }
return ConstantRange(std::move(SMin), std::move(SMax));
}
case Instruction::FPTrunc:
// Widen the LHS and RHS so we can perform a full multiplication.
unsigned Wide = CommonFXSema.getWidth() * 2;
if (CommonFXSema.isSigned()) {
- ThisVal = ThisVal.sextOrSelf(Wide);
- OtherVal = OtherVal.sextOrSelf(Wide);
+ ThisVal = ThisVal.sext(Wide);
+ OtherVal = OtherVal.sext(Wide);
} else {
- ThisVal = ThisVal.zextOrSelf(Wide);
- OtherVal = OtherVal.zextOrSelf(Wide);
+ ThisVal = ThisVal.zext(Wide);
+ OtherVal = OtherVal.zext(Wide);
}
// Perform the full multiplication and downscale to get the same scale.
// Widen the LHS and RHS so we can perform a full division.
unsigned Wide = CommonFXSema.getWidth() * 2;
if (CommonFXSema.isSigned()) {
- ThisVal = ThisVal.sextOrSelf(Wide);
- OtherVal = OtherVal.sextOrSelf(Wide);
+ ThisVal = ThisVal.sext(Wide);
+ OtherVal = OtherVal.sext(Wide);
} else {
- ThisVal = ThisVal.zextOrSelf(Wide);
- OtherVal = OtherVal.zextOrSelf(Wide);
+ ThisVal = ThisVal.zext(Wide);
+ OtherVal = OtherVal.zext(Wide);
}
// Upscale to compensate for the loss of precision from division, and
// Widen the LHS.
unsigned Wide = Sema.getWidth() * 2;
if (Sema.isSigned())
- ThisVal = ThisVal.sextOrSelf(Wide);
+ ThisVal = ThisVal.sext(Wide);
else
- ThisVal = ThisVal.zextOrSelf(Wide);
+ ThisVal = ThisVal.zext(Wide);
// Clamp the shift amount at the original width, and perform the shift.
Amt = std::min(Amt, ThisVal.getBitWidth());
/// In the slow case, we know the result is large.
APInt APInt::concatSlowCase(const APInt &NewLSB) const {
unsigned NewWidth = getBitWidth() + NewLSB.getBitWidth();
- APInt Result = NewLSB.zextOrSelf(NewWidth);
+ APInt Result = NewLSB.zext(NewWidth);
Result.insertBits(*this, NewLSB.getBitWidth());
return Result;
}
APInt APInt::getSplat(unsigned NewLen, const APInt &V) {
assert(NewLen >= V.getBitWidth() && "Can't splat to smaller bit width!");
- APInt Val = V.zextOrSelf(NewLen);
+ APInt Val = V.zext(NewLen);
for (unsigned I = V.getBitWidth(); I < NewLen; I <<= 1)
Val |= Val << I;
SDLoc DL(N);
uint64_t Val = cast<ConstantSDNode>(N)
->getAPIntValue()
- .truncOrSelf(VT.getFixedSizeInBits())
+ .trunc(VT.getFixedSizeInBits())
.getZExtValue();
switch (VT.SimpleTy) {
SDLoc DL(N);
int64_t Val = cast<ConstantSDNode>(N)
->getAPIntValue()
- .truncOrSelf(VT.getFixedSizeInBits())
+ .trunc(VT.getFixedSizeInBits())
.getSExtValue();
switch (VT.SimpleTy) {
SDValue Sat;
if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
SDValue MinC = DAG.getConstant(
- APInt::getSignedMaxValue(SatWidth).sextOrSelf(SrcElementWidth), DL,
- IntVT);
+ APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC);
SDValue MaxC = DAG.getConstant(
- APInt::getSignedMinValue(SatWidth).sextOrSelf(SrcElementWidth), DL,
- IntVT);
+ APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC);
} else {
SDValue MinC = DAG.getConstant(
- APInt::getAllOnesValue(SatWidth).zextOrSelf(SrcElementWidth), DL,
- IntVT);
+ APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT);
Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC);
}
SDValue Sat;
if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
SDValue MinC = DAG.getConstant(
- APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth), DL, DstVT);
+ APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT);
SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC);
SDValue MaxC = DAG.getConstant(
- APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth), DL, DstVT);
+ APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT);
Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC);
} else {
SDValue MinC = DAG.getConstant(
- APInt::getAllOnesValue(SatWidth).zextOrSelf(DstWidth), DL, DstVT);
+ APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT);
Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC);
}
SDLoc DL(Op);
APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue();
- return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sextOrSelf(64)),
- DL, VT);
+ return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL,
+ VT);
}
/// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
if (!Const)
return false;
- const APInt ConstValue = Const->Value.sextOrSelf(Ty.getSizeInBits());
+ APInt ConstValue = Const->Value.sext(Ty.getSizeInBits());
// The following code is ported from AArch64ISelLowering.
// Multiplication of a power of two plus/minus one can be done more
// cheaply as as shift+add/sub. For now, this is true unilaterally. If
// Try to avoid emitting a bit operation when we only need to touch half of
// the 64-bit pointer.
- APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
+ APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
unsigned W2 = A2.getBitWidth();
unsigned MaxW = (W1 >= W2) ? W1 : W2;
if (Cmp & Comparison::U) {
- const APInt Zx1 = A1.zextOrSelf(MaxW);
- const APInt Zx2 = A2.zextOrSelf(MaxW);
+ APInt Zx1 = A1.zext(MaxW);
+ APInt Zx2 = A2.zext(MaxW);
if (Cmp & Comparison::L)
Result = Zx1.ult(Zx2);
else if (Cmp & Comparison::G)
}
// Signed comparison.
- const APInt Sx1 = A1.sextOrSelf(MaxW);
- const APInt Sx2 = A2.sextOrSelf(MaxW);
+ APInt Sx1 = A1.sext(MaxW);
+ APInt Sx2 = A2.sext(MaxW);
if (Cmp & Comparison::L)
Result = Sx1.slt(Sx2);
else if (Cmp & Comparison::G)
unsigned Count, APInt &Result) {
assert(Count > 0);
unsigned BW = A1.getBitWidth(), SW = Count*Bits;
- APInt LoBits = (Bits < BW) ? A1.trunc(Bits) : A1.zextOrSelf(Bits);
+ APInt LoBits = (Bits < BW) ? A1.trunc(Bits) : A1.zext(Bits);
if (Count > 1)
LoBits = LoBits.zext(SW);
}
for (unsigned i = 0; i < HiVs.size(); ++i) {
- APInt HV = HiVs[i].zextOrSelf(64) << 32;
+ APInt HV = HiVs[i].zext(64) << 32;
for (unsigned j = 0; j < LoVs.size(); ++j) {
- APInt LV = LoVs[j].zextOrSelf(64);
+ APInt LV = LoVs[j].zext(64);
const Constant *C = intToConst(HV | LV);
Result.add(C);
if (Result.isBottom())
break;
SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
- APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
+ APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits());
if (Op0.getOpcode() == ISD::FNEG)
return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
DAG.getConstant(SignBit, DL, VT));
// floating-point values.
APInt MinInt, MaxInt;
if (IsSigned) {
- MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth);
- MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth);
+ MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
+ MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
} else {
- MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth);
- MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth);
+ MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
+ MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
}
APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
TLO, Depth + 1))
return true;
- Known.Zero = KnownZero.zextOrSelf(BitWidth);
+ Known.Zero = KnownZero.zext(BitWidth);
Known.Zero.setHighBits(BitWidth - NumElts);
// MOVMSK only uses the MSB from each vector element.
uint64_t Idx = CIdx->getZExtValue();
if (UndefVecElts[Idx])
return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
- return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
- dl, VT);
+ return DAG.getConstant(EltBits[Idx].zext(VT.getScalarSizeInBits()), dl,
+ VT);
}
}
assert(CostValue >= 0 && "Negative cost!");
unsigned Num128Lanes = SizeInBits / 128 * CostValue;
unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
- APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
+ APInt WidenedDemandedElts = DemandedElts.zext(NumElts);
unsigned Scale = NumElts / Num128Lanes;
// We iterate each 128-lane, and check if we need a
// extracti128/inserti128 for this 128-lane.
// if all elements that will form a single Dst vector aren't demanded,
// then we won't need to do that shuffle, so adjust the cost accordingly.
APInt DemandedDstVectors = APIntOps::ScaleBitMask(
- DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec),
- NumDstVectors);
+ DemandedDstElts.zext(NumDstVectors * NumEltsPerDstVec), NumDstVectors);
unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation();
InstructionCost SingleShuffleCost =
uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
// Make sure that, even if the multiplication below would wrap as an
// uint64_t, we still do the right thing.
- if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
+ if ((CS->getValue().zext(128) * APInt(128, TypeSize)).ugt(MaxSize))
return false;
continue;
}
// sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
// prove that such a combination is impossible, we need to bump the bitwidth.
if (CRs[1]->contains(APInt::getAllOnes(OrigWidth)) &&
- CRs[0]->contains(
- APInt::getSignedMinValue(MinSignedBits).sextOrSelf(OrigWidth)))
+ CRs[0]->contains(APInt::getSignedMinValue(MinSignedBits).sext(OrigWidth)))
++MinSignedBits;
// Don't shrink below 8 bits wide.
if (PtrDelta.urem(Stride) != 0)
return false;
unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits();
- APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth);
+ APInt IdxDiff = PtrDelta.udiv(Stride).zext(IdxBitWidth);
// Only look through a ZExt/SExt.
if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
// CHECK: UINT64_C(46848), // FOO32
// CHECK-LABEL: case ::FOO16: {
-// CHECK: Scratch = Scratch.zextOrSelf(41);
+// CHECK: Scratch = Scratch.zext(41);
// src.reg
// CHECK: getMachineOpValue(MI, MI.getOperand(1), /*Pos=*/0, Scratch, Fixups, STI);
// CHECK: Inst.insertBits(Scratch.extractBits(8, 0), 0);
// CHECK: Inst.insertBits(Scratch.extractBits(2, 0), 39);
// CHECK-LABEL: case ::FOO32: {
-// CHECK: Scratch = Scratch.zextOrSelf(57);
+// CHECK: Scratch = Scratch.zext(57);
// src.reg
// CHECK: getMachineOpValue(MI, MI.getOperand(1), /*Pos=*/0, Scratch, Fixups, STI);
// CHECK: Inst.insertBits(Scratch.extractBits(8, 0), 0);
raw_string_ostream SS(Case);
// Resize the scratch buffer.
if (BitWidth && !VLI.isFixedValueOnly())
- SS.indent(6) << "Scratch = Scratch.zextOrSelf(" << BitWidth << ");\n";
+ SS.indent(6) << "Scratch = Scratch.zext(" << BitWidth << ");\n";
// Populate based value.
SS.indent(6) << "Inst = getInstBits(opcode);\n";
else
T = Builder.getIntNTy(BitWidth);
- APValue = APValue.sextOrSelf(T->getBitWidth());
+ APValue = APValue.sext(T->getBitWidth());
V = ConstantInt::get(T, APValue);
isl_ast_expr_free(Expr);