From: Guillaume Chatelet Date: Fri, 6 Jan 2023 15:27:50 +0000 (+0000) Subject: Revert D141134 "[NFC] Only expose getXXXSize functions in TypeSize" X-Git-Tag: upstream/17.0.6~21893 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=87b6b347fc915e9862cdcf84284ba1c4276a8018;p=platform%2Fupstream%2Fllvm.git Revert D141134 "[NFC] Only expose getXXXSize functions in TypeSize" The patch should be discussed further. This reverts commit dd56e1c92b0e6e6be249f2d2dd40894e0417223f. --- diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h index 7aae6f3..53c888e 100644 --- a/llvm/include/llvm/Support/TypeSize.h +++ b/llvm/include/llvm/Support/TypeSize.h @@ -311,16 +311,9 @@ public: // the exact size. If the type is a scalable vector, it will represent the known // minimum size. class TypeSize : public details::FixedOrScalableQuantity { - using UP = details::FixedOrScalableQuantity; - TypeSize(const FixedOrScalableQuantity &V) : FixedOrScalableQuantity(V) {} - // Make 'getFixedValue' private, it is exposed as 'getFixedSize' below. - using UP::getFixedValue; - // Make 'getKnownMinValue' private, it is exposed as 'getKnownMinSize' below. - using UP::getKnownMinValue; - public: constexpr TypeSize(ScalarTy Quantity, bool Scalable) : FixedOrScalableQuantity(Quantity, Scalable) {} @@ -406,7 +399,7 @@ public: /// Similar to the alignTo functions in MathExtras.h inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) { assert(Align != 0u && "Align must be non-zero"); - return {(Size.getKnownMinSize() + Align - 1) / Align * Align, + return {(Size.getKnownMinValue() + Align - 1) / Align * Align, Size.isScalable()}; } diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 6ad83af..7a601ad 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -599,7 +599,7 @@ Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy, return nullptr; // If we're not accessing anything in this constant, the result is undefined. - if (Offset >= (int64_t)InitializerSize.getFixedSize()) + if (Offset >= (int64_t)InitializerSize.getFixedValue()) return PoisonValue::get(IntType); unsigned char RawBytes[32] = {0}; diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 325c8b2..e108df8 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -408,7 +408,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment, TypeSize TySize = DL.getTypeStoreSize(Ty); if (TySize.isScalable()) return false; - APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedSize()); + APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue()); return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT, TLI); } diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp index 5480629..f5dbacc 100644 --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -101,7 +101,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize(); + uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets, StartingOffset + i * EltSize); @@ -146,7 +146,7 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty, // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(&Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize(); + uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) computeValueLLTs(DL, *EltTy, ValueTys, Offsets, StartingOffset + i * EltSize); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 0a0acd3..4d44e96 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4062,11 +4062,11 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, DAG.getVScale(dl, IntPtr, APInt(IntPtr.getScalarSizeInBits(), - TySize.getKnownMinSize()))); + TySize.getKnownMinValue()))); else AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, - DAG.getConstant(TySize.getFixedSize(), dl, IntPtr)); + DAG.getConstant(TySize.getFixedValue(), dl, IntPtr)); // Handle alignment. If the requested alignment is less than or equal to // the stack alignment, ignore it. If the size is greater than or equal to diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp index 0b16f4f..f76877f 100644 --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -218,7 +218,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI, // We can't subtract a fixed size from a scalable one, so in that case // assume the scalable value is of minimum size. TypeSize NewAllocSize = - TypeSize::Fixed(AllocSize.getKnownMinSize()) - OffsetSize; + TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize; if (HasAddressTaken(I, NewAllocSize)) return true; break; diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 6fc3f66..fb331ee 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -67,7 +67,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) { getMemberOffsets()[i] = StructSize; // Consume space for this data item - StructSize += DL.getTypeAllocSize(Ty).getFixedSize(); + StructSize += DL.getTypeAllocSize(Ty).getFixedValue(); } // Add padding to the end of the struct so that it could be put in an array diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 51538bb..b417ce0 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10468,7 +10468,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); - if (SrcVTSize.getFixedSize() < VTSize) { + if (SrcVTSize.getFixedValue() < VTSize) { assert(2 * SrcVTSize == VTSize); // We can pad out the smaller vector for free, so if it's part of a // shuffle... @@ -10478,7 +10478,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, continue; } - if (SrcVTSize.getFixedSize() != 2 * VTSize) { + if (SrcVTSize.getFixedValue() != 2 * VTSize) { LLVM_DEBUG( dbgs() << "Reshuffle failed: result vector too small to extract\n"); return SDValue(); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index d542fe8..e62a1bf 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -791,7 +791,7 @@ public: TypeSize TS = VT.getSizeInBits(); // TODO: We should be able to use bic/bif too for SVE. - return !TS.isScalable() && TS.getFixedSize() >= 64; // vector 'bic' + return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic' } bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp index d2ea11f..11ba5c9 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -650,7 +650,7 @@ public: continue; } CandidateTy Candidate(GV, K.second.size(), - DL.getTypeAllocSize(GV->getValueType()).getFixedSize()); + DL.getTypeAllocSize(GV->getValueType()).getFixedValue()); if (MostUsed < Candidate) MostUsed = Candidate; } diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp index c2eb11a..3ff6e9b 100644 --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -1816,9 +1816,9 @@ auto HexagonVectorCombine::getSizeOf(const Type *Ty, SizeKind Kind) const auto *NcTy = const_cast(Ty); switch (Kind) { case Store: - return DL.getTypeStoreSize(NcTy).getFixedSize(); + return DL.getTypeStoreSize(NcTy).getFixedValue(); case Alloc: - return DL.getTypeAllocSize(NcTy).getFixedSize(); + return DL.getTypeAllocSize(NcTy).getFixedValue(); } llvm_unreachable("Unhandled SizeKind enum"); } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 753242a..22eebe1 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -138,7 +138,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, if (VT.getVectorMinNumElements() < MinElts) return; - unsigned Size = VT.getSizeInBits().getKnownMinSize(); + unsigned Size = VT.getSizeInBits().getKnownMinValue(); const TargetRegisterClass *RC; if (Size <= RISCV::RVVBitsPerBlock) RC = &RISCV::VRRegClass; @@ -1589,7 +1589,7 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { assert(VT.isScalableVector() && "Expecting a scalable vector type"); - unsigned KnownSize = VT.getSizeInBits().getKnownMinSize(); + unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); if (VT.getVectorElementType() == MVT::i1) KnownSize *= 8; @@ -5443,7 +5443,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, // Optimize for constant AVL if (isa(AVL)) { unsigned EltSize = VT.getScalarSizeInBits(); - unsigned MinSize = VT.getSizeInBits().getKnownMinSize(); + unsigned MinSize = VT.getSizeInBits().getKnownMinValue(); unsigned VectorBitsMax = Subtarget.getRealMaxVLen(); unsigned MaxVLMAX = @@ -6419,7 +6419,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op, return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2); } unsigned EltSize = VecVT.getScalarSizeInBits(); - unsigned MinSize = VecVT.getSizeInBits().getKnownMinSize(); + unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); unsigned VectorBitsMax = Subtarget.getRealMaxVLen(); unsigned MaxVLMAX = RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 54dfe3f..02ce1b1 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -1008,7 +1008,7 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) { if (isa(Ty)) { const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType()); - const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinSize(); + const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue(); const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock; return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize); } @@ -1472,7 +1472,7 @@ unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) { TypeSize Size = DL.getTypeSizeInBits(Ty); if (Ty->isVectorTy()) { if (Size.isScalable() && ST->hasVInstructions()) - return divideCeil(Size.getKnownMinSize(), RISCV::RVVBitsPerBlock); + return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock); if (ST->useRVVForFixedLengthVectors()) return divideCeil(Size, ST->getRealMinVLen()); diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index da9abdc..7dace08 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -543,7 +543,7 @@ static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR, if (!isAligned(I->getAlign(), Off)) return false; - NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedSize()); + NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue()); NeededAlign = std::max(NeededAlign, I->getAlign()); } diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp index 79f8577..bbe04fc 100644 --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -764,8 +764,8 @@ bool TypeInfer::EnforceSameNumElts(TypeSetByHwMode &V, TypeSetByHwMode &W) { namespace { struct TypeSizeComparator { bool operator()(const TypeSize &LHS, const TypeSize &RHS) const { - return std::make_tuple(LHS.isScalable(), LHS.getKnownMinSize()) < - std::make_tuple(RHS.isScalable(), RHS.getKnownMinSize()); + return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < + std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); } }; } // end anonymous namespace