assert(!EC.isScalar() && "invalid number of vector elements");
assert(!ScalarTy.isVector() && "invalid vector element type");
return LLT{ScalarTy.isPointer(), /*isVector=*/true, EC,
- ScalarTy.getSizeInBits(),
+ ScalarTy.getSizeInBits().getFixedSize(),
ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
}
return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
}
- static LLT scalarOrVector(ElementCount EC, unsigned ScalarSize) {
- return scalarOrVector(EC, LLT::scalar(ScalarSize));
+ static LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
+ assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
+ "Not enough bits in LLT to represent size");
+ return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
}
explicit LLT(bool isPointer, bool isVector, ElementCount EC,
- unsigned SizeInBits, unsigned AddressSpace) {
+ uint64_t SizeInBits, unsigned AddressSpace) {
init(isPointer, isVector, EC, SizeInBits, AddressSpace);
}
explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {}
}
/// Returns the total size of the type. Must only be called on sized types.
- unsigned getSizeInBits() const {
+ TypeSize getSizeInBits() const {
if (isPointer() || isScalar())
- return getScalarSizeInBits();
- // FIXME: This should return a TypeSize in order to work for scalable
- // vectors.
- return getScalarSizeInBits() * getElementCount().getKnownMinValue();
+ return TypeSize::Fixed(getScalarSizeInBits());
+ auto EC = getElementCount();
+ return TypeSize(getScalarSizeInBits() * EC.getKnownMinValue(),
+ EC.isScalable());
}
/// Returns the total size of the type in bytes, i.e. number of whole bytes
/// needed to represent the size in bits. Must only be called on sized types.
- unsigned getSizeInBytes() const {
- return (getSizeInBits() + 7) / 8;
+ TypeSize getSizeInBytes() const {
+ TypeSize BaseSize = getSizeInBits();
+ return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
}
LLT getScalarType() const {
getElementType());
}
- assert(getSizeInBits() % Factor == 0);
- return scalar(getSizeInBits() / Factor);
+ assert(getScalarSizeInBits() % Factor == 0);
+ return scalar(getScalarSizeInBits() / Factor);
}
- bool isByteSized() const { return (getSizeInBits() & 7) == 0; }
+ bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }
unsigned getScalarSizeInBits() const {
assert(RawData != 0 && "Invalid Type");
return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
}
- void init(bool IsPointer, bool IsVector, ElementCount EC, unsigned SizeInBits,
+ void init(bool IsPointer, bool IsVector, ElementCount EC, uint64_t SizeInBits,
unsigned AddressSpace) {
+ assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
+ "Not enough bits in LLT to represent size");
this->IsPointer = IsPointer;
this->IsVector = IsVector;
if (!IsVector) {
DstOps[0].getLLTTy(*getMRI());
}) &&
"type mismatch in output list");
- assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ assert((TypeSize::ScalarTy)DstOps.size() *
+ DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input operands do not cover output register");
break;
SrcOps[0].getLLTTy(*getMRI());
}) &&
"type mismatch in input list");
- assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ assert((TypeSize::ScalarTy)SrcOps.size() *
+ SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input operands do not cover output register");
if (SrcOps.size() == 1)
SrcOps[0].getLLTTy(*getMRI());
}) &&
"type mismatch in input list");
- assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ assert((TypeSize::ScalarTy)SrcOps.size() *
+ SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input scalars do not exactly cover the output vector register");
break;
SrcOps[0].getLLTTy(*getMRI()));
}) &&
"type mismatch in input list");
- assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ assert((TypeSize::ScalarTy)SrcOps.size() *
+ SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input vectors do not exactly cover the output vector register");
break;
assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
"Unexpected mismatch of scalable property");
- return Ty.getSizeInBits() < Other.Ty.getSizeInBits();
+ return Ty.isVector()
+ ? std::make_tuple(Ty.isScalable(),
+ Ty.getSizeInBits().getKnownMinSize()) <
+ std::make_tuple(Other.Ty.isScalable(),
+ Other.Ty.getSizeInBits().getKnownMinSize())
+ : Ty.getSizeInBits().getFixedSize() <
+ Other.Ty.getSizeInBits().getFixedSize();
}
bool operator==(const LLTCodeGen &B) const { return Ty == B.Ty; }
return None;
// Align so unusual types like i1 don't get rounded down.
- return llvm::alignTo(MemTyOrNone->get().getSizeInBits(), 8);
+ return llvm::alignTo(
+ static_cast<unsigned>(MemTyOrNone->get().getSizeInBits()), 8);
}
Expected<InstructionMatcher &> GlobalISelEmitter::addBuiltinPredicates(