// the exact size. If the type is a scalable vector, it will represent the known
// minimum size.
class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
- using UP = details::FixedOrScalableQuantity<TypeSize, uint64_t>;
-
TypeSize(const FixedOrScalableQuantity<TypeSize, uint64_t> &V)
: FixedOrScalableQuantity(V) {}
- // Make 'getFixedValue' private, it is exposed as 'getFixedSize' below.
- using UP::getFixedValue;
- // Make 'getKnownMinValue' private, it is exposed as 'getKnownMinSize' below.
- using UP::getKnownMinValue;
-
public:
constexpr TypeSize(ScalarTy Quantity, bool Scalable)
: FixedOrScalableQuantity(Quantity, Scalable) {}
/// Similar to the alignTo functions in MathExtras.h
inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) {
assert(Align != 0u && "Align must be non-zero");
- return {(Size.getKnownMinSize() + Align - 1) / Align * Align,
+ return {(Size.getKnownMinValue() + Align - 1) / Align * Align,
Size.isScalable()};
}
return nullptr;
// If we're not accessing anything in this constant, the result is undefined.
- if (Offset >= (int64_t)InitializerSize.getFixedSize())
+ if (Offset >= (int64_t)InitializerSize.getFixedValue())
return PoisonValue::get(IntType);
unsigned char RawBytes[32] = {0};
TypeSize TySize = DL.getTypeStoreSize(Ty);
if (TySize.isScalable())
return false;
- APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedSize());
+ APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
TLI);
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
StartingOffset + i * EltSize);
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
StartingOffset + i * EltSize);
AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
DAG.getVScale(dl, IntPtr,
APInt(IntPtr.getScalarSizeInBits(),
- TySize.getKnownMinSize())));
+ TySize.getKnownMinValue())));
else
AllocSize =
DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
- DAG.getConstant(TySize.getFixedSize(), dl, IntPtr));
+ DAG.getConstant(TySize.getFixedValue(), dl, IntPtr));
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
// We can't subtract a fixed size from a scalable one, so in that case
// assume the scalable value is of minimum size.
TypeSize NewAllocSize =
- TypeSize::Fixed(AllocSize.getKnownMinSize()) - OffsetSize;
+ TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
if (HasAddressTaken(I, NewAllocSize))
return true;
break;
getMemberOffsets()[i] = StructSize;
// Consume space for this data item
- StructSize += DL.getTypeAllocSize(Ty).getFixedSize();
+ StructSize += DL.getTypeAllocSize(Ty).getFixedValue();
}
// Add padding to the end of the struct so that it could be put in an array
unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
- if (SrcVTSize.getFixedSize() < VTSize) {
+ if (SrcVTSize.getFixedValue() < VTSize) {
assert(2 * SrcVTSize == VTSize);
// We can pad out the smaller vector for free, so if it's part of a
// shuffle...
continue;
}
- if (SrcVTSize.getFixedSize() != 2 * VTSize) {
+ if (SrcVTSize.getFixedValue() != 2 * VTSize) {
LLVM_DEBUG(
dbgs() << "Reshuffle failed: result vector too small to extract\n");
return SDValue();
TypeSize TS = VT.getSizeInBits();
// TODO: We should be able to use bic/bif too for SVE.
- return !TS.isScalable() && TS.getFixedSize() >= 64; // vector 'bic'
+ return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
}
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
continue;
}
CandidateTy Candidate(GV, K.second.size(),
- DL.getTypeAllocSize(GV->getValueType()).getFixedSize());
+ DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
if (MostUsed < Candidate)
MostUsed = Candidate;
}
auto *NcTy = const_cast<Type *>(Ty);
switch (Kind) {
case Store:
- return DL.getTypeStoreSize(NcTy).getFixedSize();
+ return DL.getTypeStoreSize(NcTy).getFixedValue();
case Alloc:
- return DL.getTypeAllocSize(NcTy).getFixedSize();
+ return DL.getTypeAllocSize(NcTy).getFixedValue();
}
llvm_unreachable("Unhandled SizeKind enum");
}
if (VT.getVectorMinNumElements() < MinElts)
return;
- unsigned Size = VT.getSizeInBits().getKnownMinSize();
+ unsigned Size = VT.getSizeInBits().getKnownMinValue();
const TargetRegisterClass *RC;
if (Size <= RISCV::RVVBitsPerBlock)
RC = &RISCV::VRRegClass;
RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
assert(VT.isScalableVector() && "Expecting a scalable vector type");
- unsigned KnownSize = VT.getSizeInBits().getKnownMinSize();
+ unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
if (VT.getVectorElementType() == MVT::i1)
KnownSize *= 8;
// Optimize for constant AVL
if (isa<ConstantSDNode>(AVL)) {
unsigned EltSize = VT.getScalarSizeInBits();
- unsigned MinSize = VT.getSizeInBits().getKnownMinSize();
+ unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
unsigned MaxVLMAX =
return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
}
unsigned EltSize = VecVT.getScalarSizeInBits();
- unsigned MinSize = VecVT.getSizeInBits().getKnownMinSize();
+ unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
unsigned MaxVLMAX =
RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) {
if (isa<ScalableVectorType>(Ty)) {
const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
- const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinSize();
+ const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock;
return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize);
}
TypeSize Size = DL.getTypeSizeInBits(Ty);
if (Ty->isVectorTy()) {
if (Size.isScalable() && ST->hasVInstructions())
- return divideCeil(Size.getKnownMinSize(), RISCV::RVVBitsPerBlock);
+ return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
if (ST->useRVVForFixedLengthVectors())
return divideCeil(Size, ST->getRealMinVLen());
if (!isAligned(I->getAlign(), Off))
return false;
- NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedSize());
+ NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue());
NeededAlign = std::max(NeededAlign, I->getAlign());
}
namespace {
struct TypeSizeComparator {
bool operator()(const TypeSize &LHS, const TypeSize &RHS) const {
- return std::make_tuple(LHS.isScalable(), LHS.getKnownMinSize()) <
- std::make_tuple(RHS.isScalable(), RHS.getKnownMinSize());
+ return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
+ std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
}
};
} // end anonymous namespace