/// base size.
TypeSize getStoreSize() const {
TypeSize BaseSize = getSizeInBits();
- return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
+ return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
}
// Return the number of bytes overwritten by a store of this value type or
/// For example, returns 5 for i36 and 10 for x86_fp80.
TypeSize getTypeStoreSize(Type *Ty) const {
TypeSize BaseSize = getTypeSizeInBits(Ty);
- return {divideCeil(BaseSize.getKnownMinSize(), 8), BaseSize.isScalable()};
+ return {divideCeil(BaseSize.getKnownMinValue(), 8), BaseSize.isScalable()};
}
/// Returns the maximum number of bits that may be overwritten by
if (GEP->getNumIndices() == 1 && isa<ScalableVectorType>(DerefTy) &&
m_Zero().match(GEP->getPointerOperand()) &&
m_SpecificInt(1).match(GEP->idx_begin()->get()) &&
- DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8)
+ DL.getTypeAllocSizeInBits(DerefTy).getKnownMinValue() == 8)
return true;
}
}
/// needed to represent the size in bits. Must only be called on sized types.
constexpr TypeSize getSizeInBytes() const {
TypeSize BaseSize = getSizeInBits();
- return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
+ return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
}
constexpr LLT getScalarType() const {
/// base size.
TypeSize getStoreSize() const {
TypeSize BaseSize = getSizeInBits();
- return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
+ return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
}
// Return the number of bytes overwritten by a store of this value type or
Type *Ty = I.getAllocatedType();
AllocatedSize = SaturatingMultiplyAdd(
AllocSize->getLimitedValue(),
- DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
+ DL.getTypeAllocSize(Ty).getKnownMinValue(), AllocatedSize);
if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline)
HasDynamicAlloca = true;
return false;
// Accumulate the allocated size.
if (I.isStaticAlloca()) {
Type *Ty = I.getAllocatedType();
- AllocatedSize =
- SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
+ AllocatedSize = SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinValue(),
+ AllocatedSize);
}
// FIXME: This is overly conservative. Dynamic allocas are inefficient for
TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType());
if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min)
return unknown();
- APInt Size(IntTyBits, ElemSize.getKnownMinSize());
+ APInt Size(IntTyBits, ElemSize.getKnownMinValue());
if (!I.isArrayAllocation())
return std::make_pair(align(Size, I.getAlign()), Zero);
KnownBits IndexBits(IndexBitWidth);
computeKnownBits(Index, IndexBits, Depth + 1, Q);
TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
- uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
+ uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
KnownBits ScalingFactor(IndexBitWidth);
// Multiply by current sizeof type.
// &A[i] == A + i * sizeof(*A[i]).
// to benefit from cheap constant propagation.
Type *ScalableVectorTy =
VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
- if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
+ if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinValue() == 8) {
auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
auto *One = ConstantInt::getSigned(II->getType(), 1);
auto *CGep =
for (MVT VT : MVT::all_valuetypes())
if (EVT(VT).isSimple() && VT != MVT::Other &&
TLI.isTypeLegal(EVT(VT)) &&
- VT.getSizeInBits().getKnownMinSize() >= MaximumLegalStoreInBits)
- MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinSize();
+ VT.getSizeInBits().getKnownMinValue() >= MaximumLegalStoreInBits)
+ MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinValue();
}
void ConsiderForPruning(SDNode *N) {
(TFI->isStackRealignable() || (Alignment <= StackAlign))) {
const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
uint64_t TySize =
- MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinSize();
+ MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinValue();
TySize *= CUI->getZExtValue(); // Get total allocated size.
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
Value = Result;
Chain = Ch;
- } else if (!isPowerOf2_64(SrcWidth.getKnownMinSize())) {
+ } else if (!isPowerOf2_64(SrcWidth.getKnownMinValue())) {
// If not loading a power-of-2 number of bits, expand as two loads.
assert(!SrcVT.isVector() && "Unsupported extload!");
unsigned SrcWidthBits = SrcWidth.getFixedSize();
MachinePointerInfo &MPI, SDValue &Ptr,
uint64_t *ScaledOffset) {
SDLoc DL(N);
- unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinSize() / 8;
+ unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinValue() / 8;
if (MemVT.isScalableVector()) {
SDNodeFlags Flags;
Align Alignment = SLD->getOriginalAlign();
if (LoMemVT.isScalableVector())
Alignment = commonAlignment(
- Alignment, LoMemVT.getSizeInBits().getKnownMinSize() / 8);
+ Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(SLD->getPointerInfo().getAddrSpace()),
MachinePointerInfo MPI;
if (LoMemVT.isScalableVector()) {
Alignment = commonAlignment(Alignment,
- LoMemVT.getSizeInBits().getKnownMinSize() / 8);
+ LoMemVT.getSizeInBits().getKnownMinValue() / 8);
MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
} else
MPI = N->getPointerInfo().getWithOffset(
Align Alignment = N->getOriginalAlign();
if (LoMemVT.isScalableVector())
Alignment = commonAlignment(Alignment,
- LoMemVT.getSizeInBits().getKnownMinSize() / 8);
+ LoMemVT.getSizeInBits().getKnownMinValue() / 8);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(N->getPointerInfo().getAddrSpace()),
MachinePointerInfo MPI;
if (LoMemVT.isScalableVector()) {
Alignment = commonAlignment(
- Alignment, LoMemVT.getSizeInBits().getKnownMinSize() / 8);
+ Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8);
MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
} else
MPI = N->getPointerInfo().getWithOffset(
unsigned WidenEx = 0) {
EVT WidenEltVT = WidenVT.getVectorElementType();
const bool Scalable = WidenVT.isScalableVector();
- unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinSize();
+ unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinValue();
unsigned WidenEltWidth = WidenEltVT.getSizeInBits();
unsigned AlignInBits = Align*8;
// Skip vector MVTs which don't match the scalable property of WidenVT.
if (Scalable != MemVT.isScalableVector())
continue;
- unsigned MemVTWidth = MemVT.getSizeInBits().getKnownMinSize();
+ unsigned MemVTWidth = MemVT.getSizeInBits().getKnownMinValue();
auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT);
if ((Action == TargetLowering::TypeLegal ||
Action == TargetLowering::TypePromoteInteger) &&
// Find the vector type that can load from.
std::optional<EVT> FirstVT =
- findMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
- WidthDiff.getKnownMinSize());
+ findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
+ WidthDiff.getKnownMinValue());
if (!FirstVT)
return SDValue();
RemainingWidth -= NewVTWidth;
if (TypeSize::isKnownLT(RemainingWidth, NewVTWidth)) {
// The current type we are using is too large. Find a better size.
- NewVT = findMemType(DAG, TLI, RemainingWidth.getKnownMinSize(), WidenVT,
- LdAlign, WidthDiff.getKnownMinSize());
+ NewVT = findMemType(DAG, TLI, RemainingWidth.getKnownMinValue(),
+ WidenVT, LdAlign, WidthDiff.getKnownMinValue());
if (!NewVT)
return SDValue();
NewVTWidth = NewVT->getSizeInBits();
TypeSize LdTySize = LdTy.getSizeInBits();
TypeSize NewLdTySize = NewLdTy.getSizeInBits();
assert(NewLdTySize.isScalable() == LdTySize.isScalable() &&
- NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinSize()));
+ NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinValue()));
unsigned NumOps =
- NewLdTySize.getKnownMinSize() / LdTySize.getKnownMinSize();
+ NewLdTySize.getKnownMinValue() / LdTySize.getKnownMinValue();
SmallVector<SDValue, 16> WidenOps(NumOps);
unsigned j = 0;
for (; j != End-Idx; ++j)
// We need to fill the rest with undefs to build the vector.
unsigned NumOps =
- WidenWidth.getKnownMinSize() / LdTy.getSizeInBits().getKnownMinSize();
+ WidenWidth.getKnownMinValue() / LdTy.getSizeInBits().getKnownMinValue();
SmallVector<SDValue, 16> WidenOps(NumOps);
SDValue UndefVal = DAG.getUNDEF(LdTy);
{
while (StWidth.isNonZero()) {
// Find the largest vector type we can store with.
std::optional<EVT> NewVT =
- findMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT);
+ findMemType(DAG, TLI, StWidth.getKnownMinValue(), ValVT);
if (!NewVT)
return false;
MemVTs.push_back({*NewVT, 0});
StackID = TFI->getStackIDForScalableVectors();
// The stack id gives an indication of whether the object is scalable or
// not, so it's safe to pass in the minimum size here.
- int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
+ int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinValue(), Alignment,
false, nullptr, StackID);
return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
}
assert(VT1Size.isScalable() == VT2Size.isScalable() &&
"Don't know how to choose the maximum size when creating a stack "
"temporary");
- TypeSize Bytes =
- VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
+ TypeSize Bytes = VT1Size.getKnownMinValue() > VT2Size.getKnownMinValue()
+ ? VT1Size
+ : VT2Size;
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
if (Offset.isScalable())
Index = getVScale(DL, Base.getValueType(),
APInt(Base.getValueSizeInBits().getFixedSize(),
- Offset.getKnownMinSize()));
+ Offset.getKnownMinValue()));
else
Index = getConstant(Offset.getFixedSize(), DL, VT);
// the MMO. This is because the MMO might indicate only a possible address
// range instead of specifying the affected memory addresses precisely.
// TODO: Make MachineMemOperands aware of scalable vectors.
- assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
+ assert(memvt.getStoreSize().getKnownMinValue() <= MMO->getSize() &&
"Size mismatch!");
}
DAG.getDataLayout().getTypeAllocSize(GTI.getIndexedType());
// We intentionally mask away the high bits here; ElementSize may not
// fit in IdxTy.
- APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
+ APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
bool ElementScalable = ElementSize.isScalable();
// If this is a scalar constant or a splat vector of constants,
ISD::OutputArg MyFlags(
Flags, Parts[j].getValueType().getSimpleVT(), VT,
i < CLI.NumFixedArgs, i,
- j * Parts[j].getValueType().getStoreSize().getKnownMinSize());
+ j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
else if (j != 0) {
// For scalable vectors, use the minimum size; individual targets
// are responsible for handling scalable vector arguments and
// return values.
- ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
- ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
+ ISD::InputArg MyFlags(
+ Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
+ PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
if (NumRegs > 1 && i == 0)
MyFlags.Flags.setSplit();
// if it isn't first piece, alignment must be 1
}
if (NeedsRegBlock && Value == NumValues - 1)
Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
- PartBase += VT.getStoreSize().getKnownMinSize();
+ PartBase += VT.getStoreSize().getKnownMinValue();
}
}
} else if (DataVT.isScalableVector()) {
Increment = DAG.getVScale(DL, AddrVT,
APInt(AddrVT.getFixedSizeInBits(),
- DataVT.getStoreSize().getKnownMinSize()));
+ DataVT.getStoreSize().getKnownMinValue()));
} else
Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT);
// Store the hi part of CONCAT_VECTORS(V1, V2)
SDValue OffsetToV2 = DAG.getVScale(
DL, PtrVT,
- APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize()));
+ APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinValue()));
SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2);
SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo);
DAG.getConstant(TrailingElts * EltByteSize, DL, PtrVT);
if (TrailingElts > VT.getVectorMinNumElements()) {
- SDValue VLBytes = DAG.getVScale(
- DL, PtrVT,
- APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize()));
+ SDValue VLBytes =
+ DAG.getVScale(DL, PtrVT,
+ APInt(PtrVT.getFixedSizeInBits(),
+ VT.getStoreSize().getKnownMinValue()));
TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes);
}
if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
TypeSize NewVTSize = NewVT.getSizeInBits();
// Convert sizes such as i33 to i64.
- if (!isPowerOf2_32(NewVTSize.getKnownMinSize()))
+ if (!isPowerOf2_32(NewVTSize.getKnownMinValue()))
NewVTSize = NewVTSize.coefficientNextPowerOf2();
return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
}
case Type::X86_MMXTyID:
case Type::FixedVectorTyID:
case Type::ScalableVectorTyID: {
- unsigned BitWidth = getTypeSizeInBits(Ty).getKnownMinSize();
+ unsigned BitWidth = getTypeSizeInBits(Ty).getKnownMinValue();
auto I = findAlignmentLowerBound(VECTOR_ALIGN, BitWidth);
if (I != Alignments.end() && I->AlignType == VECTOR_ALIGN &&
I->TypeBitWidth == BitWidth)
// We're only calculating a natural alignment, so it doesn't have to be
// based on the full size for scalable vectors. Using the minimum element
// count should be enough here.
- return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinSize()));
+ return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinValue()));
}
case Type::X86_AMXTyID:
return Align(64);
// Could still have vectors of pointers if the number of elements doesn't
// match
- if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
+ if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
return false;
if (SrcBits != DestBits)
if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
if (ArgMemTy->isSized()) {
// FIXME: Why isn't this the type alloc size?
- DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinSize();
+ DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinValue();
}
}
}
} else if (auto *AI = dyn_cast<AllocaInst>(this)) {
if (!AI->isArrayAllocation()) {
DerefBytes =
- DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinSize();
+ DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinValue();
CanBeNull = false;
CanBeFreed = false;
}
// vector types larger than NEON don't have a matching SubRegIndex.
static SDNode *extractSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
assert(V.getValueType().isScalableVector() &&
- V.getValueType().getSizeInBits().getKnownMinSize() ==
+ V.getValueType().getSizeInBits().getKnownMinValue() ==
AArch64::SVEBitsPerBlock &&
"Expected to extract from a packed scalable vector!");
assert(VT.isFixedLengthVector() &&
// vector types larger than NEON don't have a matching SubRegIndex.
static SDNode *insertSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
assert(VT.isScalableVector() &&
- VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock &&
+ VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock &&
"Expected to insert into a packed scalable vector!");
assert(V.getValueType().isFixedLengthVector() &&
"Expected to insert a fixed length vector!");
return false;
TypeSize TS = MemVT.getSizeInBits();
- int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinSize()) / 8;
+ int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinValue()) / 8;
int64_t MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue();
if ((MulImm % MemWidthBytes) != 0)
assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
"Expected legal vector type!");
return VT.isFixedLengthVector() ||
- VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock;
+ VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock;
}
// Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) &&
"Indirect arguments should be scalable on most subtargets");
- uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize();
+ uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinValue();
unsigned NumParts = 1;
if (Ins[i].Flags.isInConsecutiveRegs()) {
assert(!Ins[i].Flags.isInConsecutiveRegsLast());
assert((isScalable || Subtarget->isWindowsArm64EC()) &&
"Indirect arguments should be scalable on most subtargets");
- uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize();
+ uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinValue();
uint64_t PartSize = StoreSize;
unsigned NumParts = 1;
if (Outs[i].Flags.isInConsecutiveRegs()) {
return SDValue();
// Current lowering only supports the SVE-ACLE types.
- if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
+ if (VT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
return SDValue();
// The DUPQ operation is indepedent of element type so normalise to i64s.
assert(VT.isScalableVector() && "Expected a scalable vector.");
// Current lowering only supports the SVE-ACLE types.
- if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
+ if (VT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
return SDValue();
unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8;
- unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8;
+ unsigned ByteSize = VT.getSizeInBits().getKnownMinValue() / 8;
EVT ByteVT =
EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize));
SDLoc DL(N);
EVT VT = N->getValueType(0);
- if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
+ if (VT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock)
return SDValue();
EVT ContainerVT = VT;
MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT();
// Make sure that source data will fit into an SVE register
- if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
+ if (SrcVT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock)
return SDValue();
// For FPs, ACLE only supports _packed_ single and double precision types.
SDLoc DL(N);
// Make sure that the loaded data will fit into an SVE register
- if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
+ if (RetVT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock)
return SDValue();
// Depending on the addressing mode, this is either a pointer or a vector of
const DataLayout &DL) const {
if (!Ty->isArrayTy()) {
const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
- return TySize.isScalable() && TySize.getKnownMinSize() > 128;
+ return TySize.isScalable() && TySize.getKnownMinValue() > 128;
}
// All non aggregate members of the type must have the same type
// set to 1.
if (LdSt.getNumExplicitOperands() == 3) {
BaseOp = &LdSt.getOperand(1);
- Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinSize();
+ Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinValue();
} else {
assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
BaseOp = &LdSt.getOperand(2);
- Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinSize();
+ Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinValue();
}
OffsetIsScalable = Scale.isScalable();
// Construct the complete offset.
bool IsMulVL = ScaleValue.isScalable();
- unsigned Scale = ScaleValue.getKnownMinSize();
+ unsigned Scale = ScaleValue.getKnownMinValue();
int64_t Offset = IsMulVL ? SOffset.getScalable() : SOffset.getFixed();
const MachineOperand &ImmOpnd =
MaxOff))
llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
- Scale = ScaleValue.getKnownMinSize();
+ Scale = ScaleValue.getKnownMinValue();
assert(IsMulVL == ScaleValue.isScalable() &&
"Unscaled opcode has different value for scalable");
LLVMContext &Context = *DAG.getContext();
EVT ValueEltVT = ValueVT.getVectorElementType();
EVT PartEltVT = PartVT.getVectorElementType();
- unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
- unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
+ unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
+ unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
if (PartVTBitSize % ValueVTBitSize == 0) {
assert(PartVTBitSize >= ValueVTBitSize);
// If the element types are different, bitcast to the same element type of
SDValue Val = Parts[0];
EVT ValueEltVT = ValueVT.getVectorElementType();
EVT PartEltVT = PartVT.getVectorElementType();
- unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
- unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
+ unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
+ unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
if (PartVTBitSize % ValueVTBitSize == 0) {
assert(PartVTBitSize >= ValueVTBitSize);
EVT SameEltTypeVT = ValueVT;
for (auto *I : Params)
if (auto *VT = dyn_cast<llvm::VectorType>(I))
LargestVectorWidth = std::max(
- LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize());
+ LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinValue());
// Recompute the parameter attributes list based on the new arguments for
// the function.
uint64_t LargestVectorWidth = 0;
for (auto *I : NewArgumentTypes)
if (auto *VT = dyn_cast<llvm::VectorType>(I))
- LargestVectorWidth = std::max(
- LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize());
+ LargestVectorWidth =
+ std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinValue());
FunctionType *OldFnTy = OldFn->getFunctionType();
Type *RetTy = OldFnTy->getReturnType();
unsigned Elts = VecTy->getElementCount().getKnownMinValue();
// For a fixed or scalable vector, get the size in bits of N x iM; for a
// scalar this is just M.
- unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinSize();
+ unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue();
Type *EltTy = Builder.getIntNTy(SelEltSize / Elts);
SelTy = VectorType::get(EltTy, VecTy->getElementCount());
}
if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
// The alloc and cast types should be either both fixed or both scalable.
- uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize();
- uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize();
+ uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinValue();
+ uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinValue();
if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
// If the allocation has multiple uses, only promote it if we're not
// shrinking the amount of memory being allocated.
- uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize();
- uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize();
+ uint64_t AllocElTyStoreSize =
+ DL.getTypeStoreSize(AllocElTy).getKnownMinValue();
+ uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinValue();
if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
// See if we can satisfy the modulus by pulling a scale out of the array
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
- if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinSize() == 0) {
+ if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinValue() == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
DL.getTypeAllocSize(EntryAI->getAllocatedType())
- .getKnownMinSize() != 0) {
+ .getKnownMinValue() != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
BasePtrOffset.isNonNegative()) {
APInt AllocSize(
IdxWidth,
- DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize());
+ DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinValue());
if (BasePtrOffset.ule(AllocSize)) {
return GetElementPtrInst::CreateInBounds(
GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
unsigned DerefSize = MemInst->getModule()
->getDataLayout()
.getTypeStoreSize(AccType)
- .getKnownMinSize();
+ .getKnownMinValue();
if (DerefSize != 0) {
addKnowledge({Attribute::Dereferenceable, DerefSize, Pointer});
if (!NullPointerIsDefined(MemInst->getFunction(),
// Ensure MaxVF is a power of 2; the dependence distance bound may not be.
// Note that both WidestRegister and WidestType may not be a powers of 2.
auto MaxVectorElementCount = ElementCount::get(
- PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
+ PowerOf2Floor(WidestRegister.getKnownMinValue() / WidestType),
ComputeScalableMaxVF);
MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
TTI.shouldMaximizeVectorBandwidth(RegKind))) {
auto MaxVectorElementCountMaxBW = ElementCount::get(
- PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
+ PowerOf2Floor(WidestRegister.getKnownMinValue() / SmallestType),
ComputeScalableMaxVF);
MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
EXPECT_EQ(nxv4i32.getSizeInBits(), nxv2i64.getSizeInBits());
EXPECT_EQ(nxv2f64.getSizeInBits(), nxv2i64.getSizeInBits());
EXPECT_NE(nxv2i32.getSizeInBits(), nxv4i32.getSizeInBits());
- EXPECT_LT(nxv2i32.getSizeInBits().getKnownMinSize(),
- nxv2i64.getSizeInBits().getKnownMinSize());
- EXPECT_LE(nxv4i32.getSizeInBits().getKnownMinSize(),
- nxv2i64.getSizeInBits().getKnownMinSize());
- EXPECT_GT(nxv4i32.getSizeInBits().getKnownMinSize(),
- nxv2i32.getSizeInBits().getKnownMinSize());
- EXPECT_GE(nxv2i64.getSizeInBits().getKnownMinSize(),
- nxv4i32.getSizeInBits().getKnownMinSize());
+ EXPECT_LT(nxv2i32.getSizeInBits().getKnownMinValue(),
+ nxv2i64.getSizeInBits().getKnownMinValue());
+ EXPECT_LE(nxv4i32.getSizeInBits().getKnownMinValue(),
+ nxv2i64.getSizeInBits().getKnownMinValue());
+ EXPECT_GT(nxv4i32.getSizeInBits().getKnownMinValue(),
+ nxv2i32.getSizeInBits().getKnownMinValue());
+ EXPECT_GE(nxv2i64.getSizeInBits().getKnownMinValue(),
+ nxv4i32.getSizeInBits().getKnownMinValue());
// Check equivalence and ordering on fixed types.
EXPECT_EQ(v4i32.getSizeInBits(), v2i64.getSizeInBits());
// Check that we can query the known minimum size for both scalable and
// fixed length types.
- EXPECT_EQ(nxv2i32.getSizeInBits().getKnownMinSize(), 64U);
- EXPECT_EQ(nxv2f64.getSizeInBits().getKnownMinSize(), 128U);
- EXPECT_EQ(v2i32.getSizeInBits().getKnownMinSize(),
- nxv2i32.getSizeInBits().getKnownMinSize());
+ EXPECT_EQ(nxv2i32.getSizeInBits().getKnownMinValue(), 64U);
+ EXPECT_EQ(nxv2f64.getSizeInBits().getKnownMinValue(), 128U);
+ EXPECT_EQ(v2i32.getSizeInBits().getKnownMinValue(),
+ nxv2i32.getSizeInBits().getKnownMinValue());
// Check scalable property.
ASSERT_FALSE(v4i32.getSizeInBits().isScalable());
auto *V2Int64Ty = FixedVectorType::get(Int64Ty, 2);
TypeSize V2I32Len = V2Int32Ty->getPrimitiveSizeInBits();
- EXPECT_EQ(V2I32Len.getKnownMinSize(), 64U);
+ EXPECT_EQ(V2I32Len.getKnownMinValue(), 64U);
EXPECT_FALSE(V2I32Len.isScalable());
EXPECT_LT(V2Int32Ty->getPrimitiveSizeInBits().getFixedSize(),
auto *ScV2Int64Ty = ScalableVectorType::get(Int64Ty, 2);
TypeSize ScV2I32Len = ScV2Int32Ty->getPrimitiveSizeInBits();
- EXPECT_EQ(ScV2I32Len.getKnownMinSize(), 64U);
+ EXPECT_EQ(ScV2I32Len.getKnownMinValue(), 64U);
EXPECT_TRUE(ScV2I32Len.isScalable());
- EXPECT_LT(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(),
- ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize());
- EXPECT_GT(ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize(),
- ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize());
- EXPECT_EQ(ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(),
- ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize());
- EXPECT_NE(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(),
- ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize());
+ EXPECT_LT(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(),
+ ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue());
+ EXPECT_GT(ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinValue(),
+ ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinValue());
+ EXPECT_EQ(ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(),
+ ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinValue());
+ EXPECT_NE(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(),
+ ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinValue());
// Check the DataLayout interfaces.
EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int64Ty),
DL.getTypeSizeInBits(ScV4Int32Ty));
- EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int32Ty).getKnownMinSize(), 64U);
+ EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int32Ty).getKnownMinValue(), 64U);
EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty), DL.getTypeStoreSize(ScV4Int32Ty));
EXPECT_NE(DL.getTypeStoreSizeInBits(ScV2Int32Ty),
DL.getTypeStoreSizeInBits(ScV2Int64Ty));
- EXPECT_EQ(DL.getTypeStoreSizeInBits(ScV2Int32Ty).getKnownMinSize(), 64U);
- EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty).getKnownMinSize(), 16U);
+ EXPECT_EQ(DL.getTypeStoreSizeInBits(ScV2Int32Ty).getKnownMinValue(), 64U);
+ EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty).getKnownMinValue(), 16U);
EXPECT_EQ(DL.getTypeAllocSize(ScV4Int32Ty), DL.getTypeAllocSize(ScV2Int64Ty));
EXPECT_NE(DL.getTypeAllocSizeInBits(ScV2Int32Ty),
DL.getTypeAllocSizeInBits(ScV2Int64Ty));
- EXPECT_EQ(DL.getTypeAllocSizeInBits(ScV4Int32Ty).getKnownMinSize(), 128U);
- EXPECT_EQ(DL.getTypeAllocSize(ScV2Int32Ty).getKnownMinSize(), 8U);
+ EXPECT_EQ(DL.getTypeAllocSizeInBits(ScV4Int32Ty).getKnownMinValue(), 128U);
+ EXPECT_EQ(DL.getTypeAllocSize(ScV2Int32Ty).getKnownMinValue(), 8U);
ASSERT_TRUE(DL.typeSizeEqualsStoreSize(ScV4Int32Ty));
}
EXPECT_NE(V4Int32Ty->getPrimitiveSizeInBits(),
ScV4Int32Ty->getPrimitiveSizeInBits());
// If we are only checking the minimum, then they are the same size.
- EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(),
- ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize());
+ EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(),
+ ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue());
// We can't use ordering comparisons (<,<=,>,>=) between scalable and
// non-scalable vector sizes.
static_assert(TSFixed0.getFixedSize() == 0);
static_assert(TSFixed1.getFixedSize() == 1);
static_assert(TSFixed32.getFixedSize() == 32);
-static_assert(TSFixed32.getKnownMinSize() == 32);
+static_assert(TSFixed32.getKnownMinValue() == 32);
-static_assert(TypeSize::Scalable(32).getKnownMinSize() == 32);
+static_assert(TypeSize::Scalable(32).getKnownMinValue() == 32);
static_assert(TSFixed32 * 2 == TypeSize::Fixed(64));
static_assert(TSFixed32 * 2u == TypeSize::Fixed(64));