From 0e09d18c6a0a73dc9ed9e45584407a7a44fbbc4a Mon Sep 17 00:00:00 2001 From: Sander de Smalen Date: Sun, 27 Jun 2021 16:07:19 +0100 Subject: [PATCH] Reland [GlobalISel] NFC: Have LLT::getSizeInBits/Bytes return a TypeSize. This patch relands https://reviews.llvm.org/D104454, but fixes some failing builds on Mac OS which apparently has a different definition for size_t, that caused 'ambiguous operator overload' for the implicit conversion of TypeSize to a scalar value. This reverts commit b732e6c9a8438e5204ac96c8ca76f9b11abf98ff. --- .../CodeGen/GlobalISel/InstructionSelectorImpl.h | 2 +- llvm/include/llvm/Support/LowLevelTypeImpl.h | 35 ++++++++++++---------- llvm/lib/CodeGen/GlobalISel/CallLowering.cpp | 2 +- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 2 +- llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp | 12 +++++--- llvm/unittests/CodeGen/LowLevelTypeTest.cpp | 3 ++ llvm/utils/TableGen/GlobalISelEmitter.cpp | 11 +++++-- 7 files changed, 43 insertions(+), 24 deletions(-) diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h index d981496..bc9f952 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h @@ -595,7 +595,7 @@ bool InstructionSelector::executeMatchTable( case GIM_CheckPointerToAny: { int64_t InsnID = MatchTable[CurrentIdx++]; int64_t OpIdx = MatchTable[CurrentIdx++]; - int64_t SizeInBits = MatchTable[CurrentIdx++]; + uint64_t SizeInBits = MatchTable[CurrentIdx++]; DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs[" diff --git a/llvm/include/llvm/Support/LowLevelTypeImpl.h b/llvm/include/llvm/Support/LowLevelTypeImpl.h index 0660bcd8..379e371 100644 --- a/llvm/include/llvm/Support/LowLevelTypeImpl.h +++ b/llvm/include/llvm/Support/LowLevelTypeImpl.h @@ -67,7 +67,7 @@ public: assert(!EC.isScalar() && "invalid number of vector elements"); assert(!ScalarTy.isVector() && "invalid vector element type"); return LLT{ScalarTy.isPointer(), /*isVector=*/true, EC, - ScalarTy.getSizeInBits(), + ScalarTy.getSizeInBits().getFixedSize(), ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0}; } @@ -100,12 +100,14 @@ public: return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy); } - static LLT scalarOrVector(ElementCount EC, unsigned ScalarSize) { - return scalarOrVector(EC, LLT::scalar(ScalarSize)); + static LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) { + assert(ScalarSize <= std::numeric_limits::max() && + "Not enough bits in LLT to represent size"); + return scalarOrVector(EC, LLT::scalar(static_cast(ScalarSize))); } explicit LLT(bool isPointer, bool isVector, ElementCount EC, - unsigned SizeInBits, unsigned AddressSpace) { + uint64_t SizeInBits, unsigned AddressSpace) { init(isPointer, isVector, EC, SizeInBits, AddressSpace); } explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {} @@ -148,18 +150,19 @@ public: } /// Returns the total size of the type. Must only be called on sized types. - unsigned getSizeInBits() const { + TypeSize getSizeInBits() const { if (isPointer() || isScalar()) - return getScalarSizeInBits(); - // FIXME: This should return a TypeSize in order to work for scalable - // vectors. - return getScalarSizeInBits() * getElementCount().getKnownMinValue(); + return TypeSize::Fixed(getScalarSizeInBits()); + auto EC = getElementCount(); + return TypeSize(getScalarSizeInBits() * EC.getKnownMinValue(), + EC.isScalable()); } /// Returns the total size of the type in bytes, i.e. number of whole bytes /// needed to represent the size in bits. Must only be called on sized types. - unsigned getSizeInBytes() const { - return (getSizeInBits() + 7) / 8; + TypeSize getSizeInBytes() const { + TypeSize BaseSize = getSizeInBits(); + return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()}; } LLT getScalarType() const { @@ -199,11 +202,11 @@ public: getElementType()); } - assert(getSizeInBits() % Factor == 0); - return scalar(getSizeInBits() / Factor); + assert(getScalarSizeInBits() % Factor == 0); + return scalar(getScalarSizeInBits() / Factor); } - bool isByteSized() const { return (getSizeInBits() & 7) == 0; } + bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); } unsigned getScalarSizeInBits() const { assert(RawData != 0 && "Invalid Type"); @@ -333,8 +336,10 @@ private: return getMask(FieldInfo) & (RawData >> FieldInfo[1]); } - void init(bool IsPointer, bool IsVector, ElementCount EC, unsigned SizeInBits, + void init(bool IsPointer, bool IsVector, ElementCount EC, uint64_t SizeInBits, unsigned AddressSpace) { + assert(SizeInBits <= std::numeric_limits::max() && + "Not enough bits in LLT to represent size"); this->IsPointer = IsPointer; this->IsVector = IsVector; if (!IsVector) { diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index 1f60340..2815dae 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -355,7 +355,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef OrigRegs, assert(OrigRegs.size() == 1); LLT OrigTy = MRI.getType(OrigRegs[0]); - unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size(); + unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size(); if (SrcSize == OrigTy.getSizeInBits()) B.buildMerge(OrigRegs[0], Regs); else { diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index cbe4568..42c6384 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1565,7 +1565,7 @@ bool IRTranslator::translateMemFunc(const CallInst &CI, Register SrcReg = getOrCreateVReg(**AI); LLT SrcTy = MRI->getType(SrcReg); if (SrcTy.isPointer()) - MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize); + MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize); SrcRegs.push_back(SrcReg); } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index e73a5f8..3352d19 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -1117,7 +1117,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, DstOps[0].getLLTTy(*getMRI()); }) && "type mismatch in output list"); - assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == + assert((TypeSize::ScalarTy)DstOps.size() * + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "input operands do not cover output register"); break; @@ -1131,7 +1132,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, SrcOps[0].getLLTTy(*getMRI()); }) && "type mismatch in input list"); - assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + assert((TypeSize::ScalarTy)SrcOps.size() * + SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && "input operands do not cover output register"); if (SrcOps.size() == 1) @@ -1182,7 +1184,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, SrcOps[0].getLLTTy(*getMRI()); }) && "type mismatch in input list"); - assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + assert((TypeSize::ScalarTy)SrcOps.size() * + SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && "input scalars do not exactly cover the output vector register"); break; @@ -1215,7 +1218,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, SrcOps[0].getLLTTy(*getMRI())); }) && "type mismatch in input list"); - assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + assert((TypeSize::ScalarTy)SrcOps.size() * + SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && "input vectors do not exactly cover the output vector register"); break; diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp index e27c6e4..9ff75b8 100644 --- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp +++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp @@ -81,6 +81,9 @@ TEST(LowLevelTypeTest, Vector) { EXPECT_EQ(EC, VTy.getElementCount()); if (!EC.isScalable()) EXPECT_EQ(S * EC.getFixedValue(), VTy.getSizeInBits()); + else + EXPECT_EQ(TypeSize::Scalable(S * EC.getKnownMinValue()), + VTy.getSizeInBits()); // Test equality operators. EXPECT_TRUE(VTy == VTy); diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp index 497854a..6930736 100644 --- a/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -182,7 +182,13 @@ public: assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) && "Unexpected mismatch of scalable property"); - return Ty.getSizeInBits() < Other.Ty.getSizeInBits(); + return Ty.isVector() + ? std::make_tuple(Ty.isScalable(), + Ty.getSizeInBits().getKnownMinSize()) < + std::make_tuple(Other.Ty.isScalable(), + Other.Ty.getSizeInBits().getKnownMinSize()) + : Ty.getSizeInBits().getFixedSize() < + Other.Ty.getSizeInBits().getFixedSize(); } bool operator==(const LLTCodeGen &B) const { return Ty == B.Ty; } @@ -3788,7 +3794,8 @@ Optional GlobalISelEmitter::getMemSizeBitsFromPredicate(const TreePred return None; // Align so unusual types like i1 don't get rounded down. - return llvm::alignTo(MemTyOrNone->get().getSizeInBits(), 8); + return llvm::alignTo( + static_cast(MemTyOrNone->get().getSizeInBits()), 8); } Expected GlobalISelEmitter::addBuiltinPredicates( -- 2.7.4