/// promotions or expansions.
bool isTypeLegal(EVT VT) const {
assert(!VT.isSimple() ||
- (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
+ (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
}
if (VT.isExtended()) return Expand;
// If a target-specific SDNode requires legalization, require the target
// to provide custom legalization for it.
- if (Op >= array_lengthof(OpActions[0])) return Custom;
+ if (Op >= std::size(OpActions[0]))
+ return Custom;
return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
}
/// expander for it.
LegalizeAction
getCondCodeAction(ISD::CondCode CC, MVT VT) const {
- assert((unsigned)CC < array_lengthof(CondCodeActions) &&
- ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
+ assert((unsigned)CC < std::size(CondCodeActions) &&
+ ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
"Table isn't big enough!");
// See setCondCodeAction for how this is encoded.
uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
/// Return the type of registers that this ValueType will eventually require.
MVT getRegisterType(MVT VT) const {
- assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
+ assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
return RegisterTypeForVT[VT.SimpleTy];
}
MVT getRegisterType(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
- array_lengthof(RegisterTypeForVT));
+ std::size(RegisterTypeForVT));
return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
Optional<MVT> RegisterVT = None) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
- array_lengthof(NumRegistersForVT));
+ std::size(NumRegistersForVT));
return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
/// If true, the target has custom DAG combine transformations that it can
/// perform for the specified node.
bool hasTargetDAGCombine(ISD::NodeType NT) const {
- assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+ assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
}
/// specified value type. This indicates the selector can handle values of
/// that class natively.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
- assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
+ assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
RegClassForVT[VT.SimpleTy] = RC;
}
/// type and indicate what to do about it. Note that VT may refer to either
/// the type of a result or that of an operand of Op.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
- assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
+ assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
OpActions[(unsigned)VT.SimpleTy][Op] = Action;
}
void setOperationAction(ArrayRef<unsigned> Ops, MVT VT,
void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT,
LegalizeAction Action) {
for (auto CC : CCs) {
- assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
+ assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
"Table isn't big enough!");
assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
/// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
/// PerformDAGCombine virtual method.
void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) {
for (auto NT : NTs) {
- assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+ assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
}
}
// The input vector this mask element indexes into.
unsigned Input = (unsigned)Idx / NewElts;
- if (Input >= array_lengthof(Inputs)) {
+ if (Input >= std::size(Inputs)) {
// The mask element does not index into any input vector.
Ops.push_back(-1);
continue;
// Find or create a shuffle vector operand to hold this input.
unsigned OpNo;
- for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
+ for (OpNo = 0; OpNo < std::size(InputUsed); ++OpNo) {
if (InputUsed[OpNo] == Input) {
// This input vector is already an operand.
break;
}
}
- if (OpNo >= array_lengthof(InputUsed)) {
+ if (OpNo >= std::size(InputUsed)) {
// More than two input vectors used! Give up on trying to create a
// shuffle vector. Insert all elements into a BUILD_VECTOR instead.
UseBuildVector = true;
// The input vector this mask element indexes into.
unsigned Input = (unsigned)Idx / NewElts;
- if (Input >= array_lengthof(Inputs)) {
+ if (Input >= std::size(Inputs)) {
// The mask element is "undef" or indexes off the end of the input.
SVOps.push_back(MIRBuilder.buildUndef(EltTy).getReg(0));
continue;
&DL](SmallVectorImpl<int> &Mask) {
// Check if all inputs are shuffles of the same operands or non-shuffles.
MapVector<std::pair<SDValue, SDValue>, SmallVector<unsigned>> ShufflesIdxs;
- for (unsigned Idx = 0; Idx < array_lengthof(Inputs); ++Idx) {
+ for (unsigned Idx = 0; Idx < std::size(Inputs); ++Idx) {
SDValue Input = Inputs[Idx];
auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.getNode());
if (!Shuffle ||
ShufflesIdxs[std::make_pair(P.first.second, P.first.first)].clear();
}
// Check if any concat_vectors can be simplified.
- SmallBitVector UsedSubVector(2 * array_lengthof(Inputs));
+ SmallBitVector UsedSubVector(2 * std::size(Inputs));
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
}
if (UsedSubVector.count() > 1) {
SmallVector<SmallVector<std::pair<unsigned, int>, 2>> Pairs;
- for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
+ for (unsigned I = 0; I < std::size(Inputs); ++I) {
if (UsedSubVector.test(2 * I) == UsedSubVector.test(2 * I + 1))
continue;
if (Pairs.empty() || Pairs.back().size() == 2)
// Try to remove extra shuffles (except broadcasts) and shuffles with the
// reused operands.
Changed = false;
- for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
+ for (unsigned I = 0; I < std::size(Inputs); ++I) {
auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[I].getNode());
if (!Shuffle)
continue;
}
// Adjust mask in case of reused inputs. Also, need to insert constant
// inputs at first, otherwise it affects the final outcome.
- if (UniqueInputs.size() != array_lengthof(Inputs)) {
+ if (UniqueInputs.size() != std::size(Inputs)) {
auto &&UniqueVec = UniqueInputs.takeVector();
auto &&UniqueConstantVec = UniqueConstantInputs.takeVector();
unsigned ConstNum = UniqueConstantVec.size();
// Build a shuffle mask for the output, discovering on the fly which
// input vectors to use as shuffle operands.
unsigned FirstMaskIdx = High * NewElts;
- SmallVector<int> Mask(NewElts * array_lengthof(Inputs), UndefMaskElem);
+ SmallVector<int> Mask(NewElts * std::size(Inputs), UndefMaskElem);
copy(makeArrayRef(OrigMask).slice(FirstMaskIdx, NewElts), Mask.begin());
assert(!Output && "Expected default initialized initial value.");
TryPeekThroughShufflesInputs(Mask);
return SecondIteration;
};
processShuffleMasks(
- Mask, array_lengthof(Inputs), array_lengthof(Inputs),
+ Mask, std::size(Inputs), std::size(Inputs),
/*NumOfUsedRegs=*/1,
[&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
[&Output, &DAG = DAG, NewVT, &DL, &Inputs,
// If this is a simple type, use the ComputeRegisterProp mechanism.
if (VT.isSimple()) {
MVT SVT = VT.getSimpleVT();
- assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
+ assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
MVT NVT = TransformToType[SVT.SimpleTy];
LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
const MapVector<uint64_t, UnitIndexEntry> &IndexEntries,
uint32_t DWARFUnitIndex::Entry::SectionContribution::*Field) {
for (const auto &E : IndexEntries)
- for (size_t I = 0; I != array_lengthof(E.second.Contributions); ++I)
+ for (size_t I = 0; I != std::size(E.second.Contributions); ++I)
if (ContributionOffsets[I])
Out.emitIntValue(E.second.Contributions[I].*Field, 4);
}
} \
} while (false)
#define DEFINE_GETIMPL_STORE(CLASS, ARGS, OPS) \
- return storeImpl(new (array_lengthof(OPS), Storage) \
+ return storeImpl(new (std::size(OPS), Storage) \
CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \
Storage, Context.pImpl->CLASS##s)
#define DEFINE_GETIMPL_STORE_NO_OPS(CLASS, ARGS) \
CLASS(Context, Storage, UNWRAP_ARGS(ARGS)), \
Storage, Context.pImpl->CLASS##s)
#define DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(CLASS, OPS) \
- return storeImpl(new (array_lengthof(OPS), Storage) \
- CLASS(Context, Storage, OPS), \
+ return storeImpl(new (std::size(OPS), Storage) CLASS(Context, Storage, OPS), \
Storage, Context.pImpl->CLASS##s)
#define DEFINE_GETIMPL_STORE_N(CLASS, ARGS, OPS, NUM_OPS) \
return storeImpl(new (NUM_OPS, Storage) \
Macros,
SysRoot,
SDK};
- return storeImpl(new (array_lengthof(Ops), Storage) DICompileUnit(
+ return storeImpl(new (std::size(Ops), Storage) DICompileUnit(
Context, Storage, SourceLanguage, IsOptimized,
RuntimeVersion, EmissionKind, DWOId, SplitDebugInlining,
DebugInfoForProfiling, NameTableKind, RangesBaseAddress,
{"FK_SecRel_8", 0, 64, 0},
};
- assert((size_t)Kind <= array_lengthof(Builtins) && "Unknown fixup kind");
+ assert((size_t)Kind <= std::size(Builtins) && "Unknown fixup kind");
return Builtins[Kind];
}
0, // length of DW_LNS_set_epilogue_begin
1 // DW_LNS_set_isa
};
- assert(array_lengthof(StandardOpcodeLengths) >=
+ assert(std::size(StandardOpcodeLengths) >=
(Params.DWARF2LineOpcodeBase - 1U));
return Emit(
MCOS, Params,
"ARM64_RELOC_ADDEND"
};
- if (RType >= array_lengthof(Table))
+ if (RType >= std::size(Table))
res = "Unknown";
else
res = Table[RType];
uint64_t value = de.getULEB128(cursor);
std::string description;
- if (value < array_lengthof(strings))
+ if (value < std::size(strings))
description = strings[value];
else if (value <= 12)
description = "8-byte alignment, " + utostr(1ULL << value) +
uint64_t value = de.getULEB128(cursor);
std::string description;
- if (value < array_lengthof(strings))
+ if (value < std::size(strings))
description = std::string(strings[value]);
else if (value <= 12)
description = std::string("8-byte stack alignment, ") +
static const int Signals[] =
{ SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP };
-static const unsigned NumSignals = array_lengthof(Signals);
+static const unsigned NumSignals = std::size(Signals);
static struct sigaction PrevActions[NumSignals];
static void CrashRecoverySignalHandler(int Signal) {
std::max(static_cast<unsigned>(W), std::max(1u, Nibbles) + PrefixChars);
char NumberBuffer[kMaxWidth];
- ::memset(NumberBuffer, '0', llvm::array_lengthof(NumberBuffer));
+ ::memset(NumberBuffer, '0', std::size(NumberBuffer));
if (Prefix)
NumberBuffer[1] = 'x';
char *EndPtr = NumberBuffer + NumChars;
// If they are not there already, permute the components into their canonical
// positions by seeing if they parse as a valid architecture, and if so moving
// the component to the architecture position etc.
- for (unsigned Pos = 0; Pos != array_lengthof(Found); ++Pos) {
+ for (unsigned Pos = 0; Pos != std::size(Found); ++Pos) {
if (Found[Pos])
continue; // Already in the canonical position.
for (unsigned Idx = 0; Idx != Components.size(); ++Idx) {
// Do not reparse any components that already matched.
- if (Idx < array_lengthof(Found) && Found[Idx])
+ if (Idx < std::size(Found) && Found[Idx])
continue;
// Does this component parse as valid for the target position?
// components to the right.
for (unsigned i = Pos; !CurrentComponent.empty(); ++i) {
// Skip over any fixed components.
- while (i < array_lengthof(Found) && Found[i])
+ while (i < std::size(Found) && Found[i])
++i;
// Place the component at the new position, getting the component
// that was at this position - it will be moved right.
if (CurrentComponent.empty())
break;
// Advance to the next component, skipping any fixed components.
- while (++i < array_lengthof(Found) && Found[i])
+ while (++i < std::size(Found) && Found[i])
;
}
// The last component was pushed off the end - append it.
Components.push_back(CurrentComponent);
// Advance Idx to the component's new position.
- while (++Idx < array_lengthof(Found) && Found[Idx])
+ while (++Idx < std::size(Found) && Found[Idx])
;
} while (Idx < Pos); // Add more until the final position is reached.
}
};
static const size_t NumSigs =
- array_lengthof(IntSigs) + array_lengthof(KillSigs) +
- array_lengthof(InfoSigs) + 1 /* SIGPIPE */;
+ std::size(IntSigs) + std::size(KillSigs) +
+ std::size(InfoSigs) + 1 /* SIGPIPE */;
static std::atomic<unsigned> NumRegisteredSignals = ATOMIC_VAR_INIT(0);
enum class SignalKind { IsKill, IsInfo };
auto registerHandler = [&](int Signal, SignalKind Kind) {
unsigned Index = NumRegisteredSignals.load();
- assert(Index < array_lengthof(RegisteredSignalInfo) &&
+ assert(Index < std::size(RegisteredSignalInfo) &&
"Out of space for signal handlers!");
struct sigaction NewHandler;
#if defined(HAVE_BACKTRACE)
// Use backtrace() to output a backtrace on Linux systems with glibc.
if (!depth)
- depth = backtrace(StackTrace, static_cast<int>(array_lengthof(StackTrace)));
+ depth = backtrace(StackTrace, static_cast<int>(std::size(StackTrace)));
#endif
#if defined(HAVE__UNWIND_BACKTRACE)
// Try _Unwind_Backtrace() if backtrace() failed.
if (!depth)
depth = unwindBacktrace(StackTrace,
- static_cast<int>(array_lengthof(StackTrace)));
+ static_cast<int>(std::size(StackTrace)));
#endif
if (!depth)
return;
return true;
// Then compare against the list of ancient reserved names.
- for (size_t i = 0; i < array_lengthof(sReservedNames); ++i) {
+ for (size_t i = 0; i < std::size(sReservedNames); ++i) {
if (path.equals_insensitive(sReservedNames[i]))
return true;
}
if (StackFrame.AddrFrame.Offset == 0)
break;
StackTrace[Depth++] = (void *)(uintptr_t)StackFrame.AddrPC.Offset;
- if (Depth >= array_lengthof(StackTrace))
+ if (Depth >= std::size(StackTrace))
break;
}
}
constexpr FeatureBitset &operator&=(const FeatureBitset &RHS) {
- for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) {
+ for (unsigned I = 0, E = std::size(Bits); I != E; ++I) {
// GCC <6.2 crashes if this is written in a single statement.
uint32_t NewBits = Bits[I] & RHS.Bits[I];
Bits[I] = NewBits;
}
constexpr FeatureBitset &operator|=(const FeatureBitset &RHS) {
- for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I) {
+ for (unsigned I = 0, E = std::size(Bits); I != E; ++I) {
// GCC <6.2 crashes if this is written in a single statement.
uint32_t NewBits = Bits[I] | RHS.Bits[I];
Bits[I] = NewBits;
// gcc 5.3 miscompiles this if we try to write this using operator&=.
constexpr FeatureBitset operator&(const FeatureBitset &RHS) const {
FeatureBitset Result;
- for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I)
+ for (unsigned I = 0, E = std::size(Bits); I != E; ++I)
Result.Bits[I] = Bits[I] & RHS.Bits[I];
return Result;
}
// gcc 5.3 miscompiles this if we try to write this using operator&=.
constexpr FeatureBitset operator|(const FeatureBitset &RHS) const {
FeatureBitset Result;
- for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I)
+ for (unsigned I = 0, E = std::size(Bits); I != E; ++I)
Result.Bits[I] = Bits[I] | RHS.Bits[I];
return Result;
}
constexpr FeatureBitset operator~() const {
FeatureBitset Result;
- for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I)
+ for (unsigned I = 0, E = std::size(Bits); I != E; ++I)
Result.Bits[I] = ~Bits[I];
return Result;
}
constexpr bool operator!=(const FeatureBitset &RHS) const {
- for (unsigned I = 0, E = array_lengthof(Bits); I != E; ++I)
+ for (unsigned I = 0, E = std::size(Bits); I != E; ++I)
if (Bits[I] != RHS.Bits[I])
return true;
return false;
#include "llvm/Support/X86TargetParser.def"
std::numeric_limits<unsigned>::max() // Need to consume last comma.
};
- std::array<unsigned, array_lengthof(Priorities) - 1> HelperList;
+ std::array<unsigned, std::size(Priorities) - 1> HelperList;
std::iota(HelperList.begin(), HelperList.end(), 0);
assert(std::is_permutation(HelperList.begin(), HelperList.end(),
std::begin(Priorities),
C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C};
// Usually the indentation is small, handle it with a fastpath.
- if (NumChars < array_lengthof(Chars))
+ if (NumChars < std::size(Chars))
return OS.write(Chars, NumChars);
while (NumChars) {
- unsigned NumToWrite = std::min(NumChars,
- (unsigned)array_lengthof(Chars)-1);
+ unsigned NumToWrite = std::min(NumChars, (unsigned)std::size(Chars) - 1);
OS.write(Chars, NumToWrite);
NumChars -= NumToWrite;
}
static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
AArch64::X3, AArch64::X4, AArch64::X5,
AArch64::X6, AArch64::X7 };
- unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
+ unsigned NumGPRArgRegs = std::size(GPRArgRegs);
if (Subtarget->isWindowsArm64EC()) {
// In the ARM64EC ABI, only x0-x3 are used to pass arguments to varargs
// functions.
static const MCPhysReg FPRArgRegs[] = {
AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
- static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
+ static const unsigned NumFPRArgRegs = std::size(FPRArgRegs);
unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
};
const unsigned UnmangledFuncInfo::TableSize =
- array_lengthof(UnmangledFuncInfo::Table);
+ std::size(UnmangledFuncInfo::Table);
static AMDGPULibFunc::Param getRetType(AMDGPULibFunc::EFuncId id,
const AMDGPULibFunc::Param (&Leads)[2]) {
}
StringMap<int> ManglingRule::buildManglingRulesMap() {
- StringMap<int> Map(array_lengthof(manglingRules));
+ StringMap<int> Map(std::size(manglingRules));
int Id = 0;
for (auto Rule : manglingRules)
Map.insert({Rule.Name, Id++});
R600::sub12, R600::sub13, R600::sub14, R600::sub15
};
- assert(Channel < array_lengthof(SubRegFromChannelTable));
+ assert(Channel < std::size(SubRegFromChannelTable));
return SubRegFromChannelTable[Channel];
}
ARC::R4, ARC::R5, ARC::R6, ARC::R7};
auto *AFI = MF.getInfo<ARCFunctionInfo>();
unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
- if (FirstVAReg < array_lengthof(ArgRegs)) {
+ if (FirstVAReg < std::size(ArgRegs)) {
int Offset = 0;
// Save remaining registers, storing higher register numbers at a higher
// address
- // There are (array_lengthof(ArgRegs) - FirstVAReg) registers which
+ // There are (std::size(ArgRegs) - FirstVAReg) registers which
// need to be saved.
- int VarFI =
- MFI.CreateFixedObject((array_lengthof(ArgRegs) - FirstVAReg) * 4,
- CCInfo.getNextStackOffset(), true);
+ int VarFI = MFI.CreateFixedObject((std::size(ArgRegs) - FirstVAReg) * 4,
+ CCInfo.getNextStackOffset(), true);
AFI->setVarArgsFrameIndex(VarFI);
SDValue FIN = DAG.getFrameIndex(VarFI, MVT::i32);
- for (unsigned i = FirstVAReg; i < array_lengthof(ArgRegs); i++) {
+ for (unsigned i = FirstVAReg; i < std::size(ArgRegs); i++) {
// Move argument from phys reg -> virt reg
unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass);
RegInfo.addLiveIn(ArgRegs[i], VReg);
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
: ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
Subtarget(STI) {
- for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
+ for (unsigned i = 0, e = std::size(ARM_MLxTable); i != e; ++i) {
if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
llvm_unreachable("Duplicated entries?");
MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
const TargetFrameLowering::SpillSlot *
ARMFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
static const SpillSlot FixedSpillOffsets[] = {{ARM::FPCXTNS, -4}};
- NumEntries = array_lengthof(FixedSpillOffsets);
+ NumEntries = std::size(FixedSpillOffsets);
return FixedSpillOffsets;
}
int lastInsIndex = -1;
if (isVarArg && MFI.hasVAStart()) {
unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
- if (RegIdx != array_lengthof(GPRArgRegs))
+ if (RegIdx != std::size(GPRArgRegs))
ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
}
Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
: MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) {
OptimizeSize = MinimizeSize = false;
- for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
+ for (unsigned i = 0, e = std::size(ReduceTable); i != e; ++i) {
unsigned FromOpc = ReduceTable[i].WideOpc;
if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
llvm_unreachable("Duplicated entries?");
AVR::R24R23, AVR::R23R22,
AVR::R22R21, AVR::R21R20};
-static_assert(array_lengthof(RegList8AVR) == array_lengthof(RegList16AVR),
+static_assert(std::size(RegList8AVR) == std::size(RegList16AVR),
"8-bit and 16-bit register arrays must be of equal length");
-static_assert(array_lengthof(RegList8Tiny) == array_lengthof(RegList16Tiny),
+static_assert(std::size(RegList8Tiny) == std::size(RegList16Tiny),
"8-bit and 16-bit register arrays must be of equal length");
/// Analyze incoming and outgoing function arguments. We need custom C++ code
ArrayRef<MCPhysReg> RegList8;
ArrayRef<MCPhysReg> RegList16;
if (Tiny) {
- RegList8 = makeArrayRef(RegList8Tiny, array_lengthof(RegList8Tiny));
- RegList16 = makeArrayRef(RegList16Tiny, array_lengthof(RegList16Tiny));
+ RegList8 = makeArrayRef(RegList8Tiny, std::size(RegList8Tiny));
+ RegList16 = makeArrayRef(RegList16Tiny, std::size(RegList16Tiny));
} else {
- RegList8 = makeArrayRef(RegList8AVR, array_lengthof(RegList8AVR));
- RegList16 = makeArrayRef(RegList16AVR, array_lengthof(RegList16AVR));
+ RegList8 = makeArrayRef(RegList8AVR, std::size(RegList8AVR));
+ RegList16 = makeArrayRef(RegList16AVR, std::size(RegList16AVR));
}
unsigned NumArgs = Args.size();
ArrayRef<MCPhysReg> RegList8;
ArrayRef<MCPhysReg> RegList16;
if (Tiny) {
- RegList8 = makeArrayRef(RegList8Tiny, array_lengthof(RegList8Tiny));
- RegList16 = makeArrayRef(RegList16Tiny, array_lengthof(RegList16Tiny));
+ RegList8 = makeArrayRef(RegList8Tiny, std::size(RegList8Tiny));
+ RegList16 = makeArrayRef(RegList16Tiny, std::size(RegList16Tiny));
} else {
- RegList8 = makeArrayRef(RegList8AVR, array_lengthof(RegList8AVR));
- RegList16 = makeArrayRef(RegList16AVR, array_lengthof(RegList16AVR));
+ RegList8 = makeArrayRef(RegList8AVR, std::size(RegList8AVR));
+ RegList16 = makeArrayRef(RegList16AVR, std::size(RegList16AVR));
}
// GCC-ABI says that the size is rounded up to the next even number,
/* 28 */ 0, 0, UTIMERLO, UTIMERHI
};
- if (RegNo >= array_lengthof(CtrlRegDecoderTable))
+ if (RegNo >= std::size(CtrlRegDecoderTable))
return MCDisassembler::Fail;
static_assert(NoRegister == 0, "Expecting NoRegister to be 0");
/* 28 */ 0, 0, UTIMER, 0
};
- if (RegNo >= array_lengthof(CtrlReg64DecoderTable))
+ if (RegNo >= std::size(CtrlReg64DecoderTable))
return MCDisassembler::Fail;
static_assert(NoRegister == 0, "Expecting NoRegister to be 0");
/* 28 */ GPMUCNT2, GPMUCNT3, G30, G31
};
- if (RegNo >= array_lengthof(GuestRegDecoderTable))
+ if (RegNo >= std::size(GuestRegDecoderTable))
return MCDisassembler::Fail;
if (GuestRegDecoderTable[RegNo] == Hexagon::NoRegister)
return MCDisassembler::Fail;
/* 28 */ G29_28, 0, G31_30, 0
};
- if (RegNo >= array_lengthof(GuestReg64DecoderTable))
+ if (RegNo >= std::size(GuestReg64DecoderTable))
return MCDisassembler::Fail;
if (GuestReg64DecoderTable[RegNo] == Hexagon::NoRegister)
return MCDisassembler::Fail;
{ Hexagon::R25, -36 }, { Hexagon::R24, -40 }, { Hexagon::D12, -40 },
{ Hexagon::R27, -44 }, { Hexagon::R26, -48 }, { Hexagon::D13, -48 }
};
- NumEntries = array_lengthof(Offsets);
+ NumEntries = std::size(Offsets);
return Offsets;
}
static const unsigned Regs01[] = { LC0, SA0, LC1, SA1 };
static const unsigned Regs1[] = { LC1, SA1 };
- auto CheckRegs = IsInnerHWLoop ? makeArrayRef(Regs01, array_lengthof(Regs01))
- : makeArrayRef(Regs1, array_lengthof(Regs1));
+ auto CheckRegs = IsInnerHWLoop ? makeArrayRef(Regs01, std::size(Regs01))
+ : makeArrayRef(Regs1, std::size(Regs1));
for (unsigned R : CheckRegs)
if (MI->modifiesRegister(R, TRI))
return true;
SDNode *S = StoreInstrForLoadIntrinsic(L, C);
SDValue F[] = { SDValue(N,0), SDValue(N,1), SDValue(C,0), SDValue(C,1) };
SDValue T[] = { SDValue(L,0), SDValue(S,0), SDValue(L,1), SDValue(S,0) };
- ReplaceUses(F, T, array_lengthof(T));
+ ReplaceUses(F, T, std::size(T));
// This transformation will leave the intrinsic dead. If it remains in
// the DAG, the selection code will see it again, but without the load,
// and it will generate a store that is normally required for it.
Hexagon::R0, Hexagon::R1, Hexagon::R2,
Hexagon::R3, Hexagon::R4, Hexagon::R5
};
- const unsigned NumArgRegs = array_lengthof(ArgRegs);
+ const unsigned NumArgRegs = std::size(ArgRegs);
unsigned RegNum = State.getFirstUnallocated(ArgRegs);
// RegNum is an index into ArgRegs: skip a register if RegNum is odd.
Hexagon::fixup_Hexagon_GPREL16_0, Hexagon::fixup_Hexagon_GPREL16_1,
Hexagon::fixup_Hexagon_GPREL16_2, Hexagon::fixup_Hexagon_GPREL16_3
};
- assert(Shift < array_lengthof(GPRelFixups));
+ assert(Shift < std::size(GPRelFixups));
auto UsesGP = [] (const MCInstrDesc &D) {
for (const MCPhysReg *U = D.getImplicitUses(); U && *U; ++U)
if (*U == Hexagon::GP)
}
// FPR32 and FPR64 alias each other.
- if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
+ if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s))
UseGPRForFloat = true;
if (UseGPRForFloat && ValVT == MVT::f32) {
DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
// Skip 'odd' register if necessary.
- if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
+ if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
State.AllocateReg(ArgGPRs);
}
// TODO: Add more fixup kinds.
};
- static_assert((array_lengthof(Infos)) == LoongArch::NumTargetFixupKinds,
+ static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds,
"Not all fixup kinds added to Infos array");
// Fixup kinds from .reloc directive are like R_LARCH_NONE. They
{"fixup_8", 0, 8, 0},
{"fixup_sym_diff", 0, 32, 0},
};
- static_assert((array_lengthof(Infos)) == MSP430::NumTargetFixupKinds,
+ static_assert((std::size(Infos)) == MSP430::NumTargetFixupKinds,
"Not all fixup kinds added to Infos array");
-
+
if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
static const MCPhysReg CRegList[] = {
MSP430::R12, MSP430::R13, MSP430::R14, MSP430::R15
};
- static const unsigned CNbRegs = array_lengthof(CRegList);
+ static const unsigned CNbRegs = std::size(CRegList);
static const MCPhysReg BuiltinRegList[] = {
MSP430::R8, MSP430::R9, MSP430::R10, MSP430::R11,
MSP430::R12, MSP430::R13, MSP430::R14, MSP430::R15
};
- static const unsigned BuiltinNbRegs = array_lengthof(BuiltinRegList);
+ static const unsigned BuiltinNbRegs = std::size(BuiltinRegList);
ArrayRef<MCPhysReg> RegList;
unsigned NbRegs;
{ "fixup_Mips_JALR", 0, 32, 0 },
{ "fixup_MICROMIPS_JALR", 0, 32, 0 }
};
- static_assert(array_lengthof(LittleEndianInfos) == Mips::NumTargetFixupKinds,
+ static_assert(std::size(LittleEndianInfos) == Mips::NumTargetFixupKinds,
"Not all MIPS little endian fixup kinds added!");
const static MCFixupKindInfo BigEndianInfos[] = {
{ "fixup_Mips_JALR", 0, 32, 0 },
{ "fixup_MICROMIPS_JALR", 0, 32, 0 }
};
- static_assert(array_lengthof(BigEndianInfos) == Mips::NumTargetFixupKinds,
+ static_assert(std::size(BigEndianInfos) == Mips::NumTargetFixupKinds,
"Not all MIPS big endian fixup kinds added!");
if (Kind >= FirstLiteralRelocationKind)
}
void Mips16TargetLowering::setMips16HardFloatLibCalls() {
- for (unsigned I = 0; I != array_lengthof(HardFloatLibCalls); ++I) {
+ for (unsigned I = 0; I != std::size(HardFloatLibCalls); ++I) {
assert((I == 0 || HardFloatLibCalls[I - 1] < HardFloatLibCalls[I]) &&
"Array not sorted!");
if (HardFloatLibCalls[I].Libcall != RTLIB::UNKNOWN_LIBCALL)
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- const unsigned NumArgRegs = array_lengthof(ArgRegs);
+ const unsigned NumArgRegs = std::size(ArgRegs);
unsigned RegNum = State.getFirstUnallocated(ArgRegs);
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- const unsigned NumArgRegs = array_lengthof(ArgRegs);
+ const unsigned NumArgRegs = std::size(ArgRegs);
unsigned RegNum = State.getFirstUnallocated(ArgRegs);
int RegsLeft = NumArgRegs - RegNum;
PPC::F8
};
- const unsigned NumArgRegs = array_lengthof(ArgRegs);
+ const unsigned NumArgRegs = std::size(ArgRegs);
unsigned RegNum = State.getFirstUnallocated(ArgRegs);
CALLEE_SAVED_FPRS, CALLEE_SAVED_GPRS64, CALLEE_SAVED_VRS};
if (Subtarget.is64BitELFABI()) {
- NumEntries = array_lengthof(ELFOffsets64);
+ NumEntries = std::size(ELFOffsets64);
return ELFOffsets64;
}
if (Subtarget.is32BitELFABI()) {
- NumEntries = array_lengthof(ELFOffsets32);
+ NumEntries = std::size(ELFOffsets32);
return ELFOffsets32;
}
assert(Subtarget.isAIXABI() && "Unexpected ABI.");
if (Subtarget.isPPC64()) {
- NumEntries = array_lengthof(AIXOffsets64);
+ NumEntries = std::size(AIXOffsets64);
return AIXOffsets64;
}
- NumEntries = array_lengthof(AIXOffsets32);
+ NumEntries = std::size(AIXOffsets32);
return AIXOffsets32;
}
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
+ const unsigned NumGPArgRegs = std::size(GPArgRegs);
static const MCPhysReg FPArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
};
- unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
+ unsigned NumFPArgRegs = std::size(FPArgRegs);
if (useSoftFloat() || hasSPE())
NumFPArgRegs = 0;
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
- const unsigned Num_GPR_Regs = array_lengthof(GPR);
+ const unsigned Num_GPR_Regs = std::size(GPR);
const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
- const unsigned Num_VR_Regs = array_lengthof(VR);
+ const unsigned Num_VR_Regs = std::size(VR);
// Do a first pass over the arguments to determine whether the ABI
// guarantees that our caller has allocated the parameter save area
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
- const unsigned NumGPRs = array_lengthof(GPR);
+ const unsigned NumGPRs = std::size(GPR);
const unsigned NumFPRs = 13;
- const unsigned NumVRs = array_lengthof(VR);
+ const unsigned NumVRs = std::size(VR);
const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
unsigned NumBytes = LinkageSize;
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
- const unsigned NumGPRs = array_lengthof(GPR);
+ const unsigned NumGPRs = std::size(GPR);
const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
- const unsigned NumVRs = array_lengthof(VR);
+ const unsigned NumVRs = std::size(VR);
// On ELFv2, we can avoid allocating the parameter area if all the arguments
// can be passed to the callee in registers.
static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10};
- const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
+ const unsigned NumGPArgRegs = std::size(IsPPC64 ? GPR_64 : GPR_32);
// The fixed integer arguments of a variadic function are stored to the
// VarArgsFrameIndex on the stack so that they may be loaded by
-8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
};
- for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
+ for (unsigned idx = 0; idx < std::size(SplatCsts); ++idx) {
// Indirect through the SplatCsts array so that we favor 'vsplti -1' for
// cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
int i = SplatCsts[idx];
// Check if an opcode is a FMA instruction. If it is, return the index in array
// FMAOpIdxInfo. Otherwise, return -1.
int16_t PPCInstrInfo::getFMAOpIdxInfo(unsigned Opcode) const {
- for (unsigned I = 0; I < array_lengthof(FMAOpIdxInfo); I++)
+ for (unsigned I = 0; I < std::size(FMAOpIdxInfo); I++)
if (FMAOpIdxInfo[I][InfoArrayIdxFMAInst] == Opcode)
return I;
return -1;
bool Found = false;
for (const MachineOperand &MO : MI.operands()) {
- for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) {
+ for (unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
const TargetRegisterClass *RC = RCs[c];
if (MO.isReg()) {
if (MO.isDef() && RC->contains(MO.getReg())) {
{"fixup_riscv_set_6b", 2, 6, 0},
{"fixup_riscv_sub_6b", 2, 6, 0},
};
- static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds,
+ static_assert((std::size(Infos)) == RISCV::NumTargetFixupKinds,
"Not all fixup kinds added to Infos array");
// Fixup kinds from .reloc directive are like R_RISCV_NONE. They
}
// FPR16, FPR32, and FPR64 alias each other.
- if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
+ if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s)) {
UseGPRForF16_F32 = true;
UseGPRForF64 = true;
}
DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
// Skip 'odd' register if necessary.
- if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
+ if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
State.AllocateReg(ArgGPRs);
}
static DecodeStatus DecodePRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const MCDisassembler *Decoder) {
- if (RegNo >= array_lengthof(PRRegDecoderTable))
+ if (RegNo >= std::size(PRRegDecoderTable))
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::createReg(PRRegDecoderTable[RegNo]));
return MCDisassembler::Success;
{VE::SX25, 104}, {VE::SX26, 112}, {VE::SX27, 120}, {VE::SX28, 128},
{VE::SX29, 136}, {VE::SX30, 144}, {VE::SX31, 152}, {VE::SX32, 160},
{VE::SX33, 168}};
- NumEntries = array_lengthof(Offsets);
+ NumEntries = std::size(Offsets);
return Offsets;
}
}
}
- for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
+ for (unsigned I = 0, E = std::size(Match); I != E; ++I) {
Tmp.back() = Suffixes[I];
if (MemOp && HasVectorReg)
MemOp->Mem.Size = MemSize[I];
if (NumSuccessfulMatches > 1) {
char MatchChars[4];
unsigned NumMatches = 0;
- for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I)
+ for (unsigned I = 0, E = std::size(Match); I != E; ++I)
if (Match[I] == Match_Success)
MatchChars[NumMatches++] = Suffixes[I];
continue;
bool IsAnyViable = false;
- for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
+ for (unsigned j = 0; j != std::size(ViableForN); ++j)
if (ViableForN[j]) {
uint64_t N = j + 1;
break;
}
- for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
+ for (unsigned j = 0; j != std::size(ViableForN); ++j)
if (ViableForN[j])
return j + 1;
};
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
- if (FirstVAReg < array_lengthof(ArgRegs)) {
+ if (FirstVAReg < std::size(ArgRegs)) {
int offset = 0;
// Save remaining registers, storing higher register numbers at a higher
// address
- for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
+ for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
// Create a stack slot
int FI = MFI.CreateFixedObject(4, offset, true);
if (i == (int)FirstVAReg) {
}
}
- assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
- "out-of-bound access");
+ assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");
Value *Result;
if (!SimpVect.empty())
Renamer(unsigned int seed) { prng.srand(seed); }
const char *newName() {
- return metaNames[prng.rand() % array_lengthof(metaNames)];
+ return metaNames[prng.rand() % std::size(metaNames)];
}
PRNG prng;
"Reserved",
};
outs() << "\nThe Data Directory\n";
- for (uint32_t I = 0; I != array_lengthof(DirName); ++I) {
+ for (uint32_t I = 0; I != std::size(DirName); ++I) {
uint32_t Addr = 0, Size = 0;
if (const data_directory *Data = Obj.getDataDirectory(I)) {
Addr = Data->RelativeVirtualAddress;
bool Terminated = false;
for (unsigned OI = Offset, OE = Opcodes.size(); !Terminated && OI < OE; ) {
for (unsigned DI = 0;; ++DI) {
- if ((isAArch64 && (DI >= array_lengthof(Ring64))) ||
- (!isAArch64 && (DI >= array_lengthof(Ring)))) {
+ if ((isAArch64 && (DI >= std::size(Ring64))) ||
+ (!isAArch64 && (DI >= std::size(Ring)))) {
SW.startLine() << format("0x%02x ; Bad opcode!\n",
Opcodes.data()[OI]);
++OI;
"Linux", "Hurd", "Solaris", "FreeBSD", "NetBSD", "Syllable", "NaCl",
};
StringRef OSName = "Unknown";
- if (Words[0] < array_lengthof(OSNames))
+ if (Words[0] < std::size(OSNames))
OSName = OSNames[Words[0]];
uint32_t Major = Words[1], Minor = Words[2], Patch = Words[3];
std::string str;
};
// Convert payload integer to decimal string representation.
- std::string NaNPayloadDecStrings[array_lengthof(NaNPayloads)];
- for (size_t I = 0; I < array_lengthof(NaNPayloads); ++I)
+ std::string NaNPayloadDecStrings[std::size(NaNPayloads)];
+ for (size_t I = 0; I < std::size(NaNPayloads); ++I)
NaNPayloadDecStrings[I] = utostr(NaNPayloads[I]);
// Convert payload integer to hexadecimal string representation.
- std::string NaNPayloadHexStrings[array_lengthof(NaNPayloads)];
- for (size_t I = 0; I < array_lengthof(NaNPayloads); ++I)
+ std::string NaNPayloadHexStrings[std::size(NaNPayloads)];
+ for (size_t I = 0; I < std::size(NaNPayloads); ++I)
NaNPayloadHexStrings[I] = "0x" + utohexstr(NaNPayloads[I]);
// Fix payloads to expected result.
for (char TypeChar : NaNTypes) {
bool Signaling = (TypeChar == 's' || TypeChar == 'S');
- for (size_t J = 0; J < array_lengthof(NaNPayloads); ++J) {
+ for (size_t J = 0; J < std::size(NaNPayloads); ++J) {
uint64_t Payload = (Signaling && !NaNPayloads[J]) ? SNaNDefaultPayload
: NaNPayloads[J];
std::string &PayloadDec = NaNPayloadDecStrings[J];
{ MSmallestNormalized, MSmallestNormalized, "-0x1p-125", APFloat::opOK, APFloat::fcNormal }
};
- for (size_t i = 0; i < array_lengthof(SpecialCaseTests); ++i) {
+ for (size_t i = 0; i < std::size(SpecialCaseTests); ++i) {
APFloat x(SpecialCaseTests[i].x);
APFloat y(SpecialCaseTests[i].y);
APFloat::opStatus status = x.add(y, APFloat::rmNearestTiesToEven);
{ MSmallestNormalized, MSmallestNormalized, "0x0p+0", APFloat::opOK, APFloat::fcZero }
};
- for (size_t i = 0; i < array_lengthof(SpecialCaseTests); ++i) {
+ for (size_t i = 0; i < std::size(SpecialCaseTests); ++i) {
APFloat x(SpecialCaseTests[i].x);
APFloat y(SpecialCaseTests[i].y);
APFloat::opStatus status = x.subtract(y, APFloat::rmNearestTiesToEven);
APFloat::rmNearestTiesToAway},
};
- for (size_t i = 0; i < array_lengthof(SpecialCaseTests); ++i) {
+ for (size_t i = 0; i < std::size(SpecialCaseTests); ++i) {
APFloat x(SpecialCaseTests[i].x);
APFloat y(SpecialCaseTests[i].y);
APFloat::opStatus status = x.multiply(y, SpecialCaseTests[i].roundingMode);
APFloat::rmNearestTiesToAway},
};
- for (size_t i = 0; i < array_lengthof(SpecialCaseTests); ++i) {
+ for (size_t i = 0; i < std::size(SpecialCaseTests); ++i) {
APFloat x(SpecialCaseTests[i].x);
APFloat y(SpecialCaseTests[i].y);
APFloat::opStatus status = x.divide(y, SpecialCaseTests[i].roundingMode);
{ MVal6, MVal6, "-0x0p+0", APFloat::opOK, APFloat::fcZero },
};
- for (size_t i = 0; i < array_lengthof(SpecialCaseTests); ++i) {
+ for (size_t i = 0; i < std::size(SpecialCaseTests); ++i) {
APFloat x(SpecialCaseTests[i].x);
APFloat y(SpecialCaseTests[i].y);
APFloat::opStatus status = x.remainder(y);
uint32_t U32;
uint64_t U64;
- for (size_t i = 0; i < array_lengthof(Unsigned); ++i) {
+ for (size_t i = 0; i < std::size(Unsigned); ++i) {
bool U8Success = StringRef(Unsigned[i].Str).getAsInteger(0, U8);
if (static_cast<uint8_t>(Unsigned[i].Expected) == Unsigned[i].Expected) {
ASSERT_FALSE(U8Success);
int32_t S32;
int64_t S64;
- for (size_t i = 0; i < array_lengthof(Signed); ++i) {
+ for (size_t i = 0; i < std::size(Signed); ++i) {
bool S8Success = StringRef(Signed[i].Str).getAsInteger(0, S8);
if (static_cast<int8_t>(Signed[i].Expected) == Signed[i].Expected) {
ASSERT_FALSE(S8Success);
TEST(StringRefTest, getAsUnsignedIntegerBadStrings) {
unsigned long long U64;
- for (size_t i = 0; i < array_lengthof(BadStrings); ++i) {
+ for (size_t i = 0; i < std::size(BadStrings); ++i) {
bool IsBadNumber = StringRef(BadStrings[i]).getAsInteger(0, U64);
ASSERT_TRUE(IsBadNumber);
}
uint32_t U32;
uint64_t U64;
- for (size_t i = 0; i < array_lengthof(ConsumeUnsigned); ++i) {
+ for (size_t i = 0; i < std::size(ConsumeUnsigned); ++i) {
StringRef Str = ConsumeUnsigned[i].Str;
bool U8Success = Str.consumeInteger(0, U8);
if (static_cast<uint8_t>(ConsumeUnsigned[i].Expected) ==
int32_t S32;
int64_t S64;
- for (size_t i = 0; i < array_lengthof(ConsumeSigned); ++i) {
+ for (size_t i = 0; i < std::size(ConsumeSigned); ++i) {
StringRef Str = ConsumeSigned[i].Str;
bool S8Success = Str.consumeInteger(0, S8);
if (static_cast<int8_t>(ConsumeSigned[i].Expected) ==
TEST(StringRefTest, joinStrings) {
std::vector<StringRef> v1;
std::vector<std::string> v2;
- for (size_t i = 0; i < array_lengthof(join_input); ++i) {
+ for (size_t i = 0; i < std::size(join_input); ++i) {
v1.push_back(join_input[i]);
v2.push_back(join_input[i]);
}
std::vector<PtrT> TestPtrs;
TinyPtrVectorTest() {
- for (size_t i = 0, e = array_lengthof(TestValues); i != e; ++i)
+ for (size_t i = 0, e = std::size(TestValues); i != e; ++i)
TestPtrs.push_back(PtrT(&TestValues[i]));
std::shuffle(TestPtrs.begin(), TestPtrs.end(), std::mt19937{});
TEST_P(CoverageMappingTest, correct_deserialize_for_more_than_two_files) {
const char *FileNames[] = {"bar", "baz", "foo"};
- static const unsigned N = array_lengthof(FileNames);
+ static const unsigned N = std::size(FileNames);
startFunction("func", 0x1234);
for (unsigned I = 0; I < N; ++I)
ProfileWriter.addRecord({"func", 0x1234, {0}}, Err);
const char *FileNames[] = {"bar", "baz", "foo"};
- static const unsigned N = array_lengthof(FileNames);
+ static const unsigned N = std::size(FileNames);
startFunction("func", 0x1234);
for (unsigned I = 0; I < N; ++I)
};
constexpr endianness Endians[] = {big, little, native};
-constexpr uint32_t NumEndians = llvm::array_lengthof(Endians);
+constexpr uint32_t NumEndians = std::size(Endians);
constexpr uint32_t NumStreams = 2 * NumEndians;
class BinaryStreamTest : public testing::Test {
{"10e60", "no10e60", "+10e60", "-10e60"},
};
- for (unsigned i = 0; i < array_lengthof(ArchExt); i++) {
+ for (unsigned i = 0; i < std::size(ArchExt); i++) {
EXPECT_EQ(StringRef(ArchExt[i][2]), CSKY::getArchExtFeature(ArchExt[i][0]));
EXPECT_EQ(StringRef(ArchExt[i][3]), CSKY::getArchExtFeature(ArchExt[i][1]));
}
{ "-tool", "-alias", "x" }
};
- for (size_t i = 0, e = array_lengthof(Inputs); i < e; ++i) {
+ for (size_t i = 0, e = std::size(Inputs); i < e; ++i) {
StackOption<std::string> Actual("actual");
StackOption<bool> Extra("extra");
StackOption<std::string> Input(cl::Positional);
TEST(CommandLineTest, AliasRequired) {
const char *opts1[] = { "-tool", "-option=x" };
const char *opts2[] = { "-tool", "-o", "x" };
- testAliasRequired(array_lengthof(opts1), opts1);
- testAliasRequired(array_lengthof(opts2), opts2);
+ testAliasRequired(std::size(opts1), opts1);
+ testAliasRequired(std::size(opts2), opts2);
}
TEST(CommandLineTest, HideUnrelatedOptions) {
TEST(CommandLineTest, SetMultiValues) {
StackOption<int> Option("option");
const char *args[] = {"prog", "-option=1", "-option=2"};
- EXPECT_TRUE(cl::ParseCommandLineOptions(array_lengthof(args), args,
- StringRef(), &llvm::nulls()));
+ EXPECT_TRUE(cl::ParseCommandLineOptions(std::size(args), args, StringRef(),
+ &llvm::nulls()));
EXPECT_EQ(Option, 2);
}
/*CurrentDir=*/StringRef(TestRoot), FS));
const char *Expected[] = {"clang", "-Xclang", "-Wno-whatever", nullptr,
"input.cpp"};
- ASSERT_EQ(array_lengthof(Expected), Argv.size());
- for (size_t I = 0, E = array_lengthof(Expected); I < E; ++I) {
+ ASSERT_EQ(std::size(Expected), Argv.size());
+ for (size_t I = 0, E = std::size(Expected); I < E; ++I) {
if (Expected[I] == nullptr) {
ASSERT_EQ(Argv[I], nullptr);
} else {
std::string S;
llvm::raw_string_ostream Stream(S);
- Stream << formatv(Intro, std::tuple_size<Tuple>::value,
- llvm::array_lengthof(Ts))
+ Stream << formatv(Intro, std::tuple_size<Tuple>::value, std::size(Ts))
<< "\n";
Stream << formatv(Header, "Char", "HexInt", "Str", "Ref", "std::str",
"double", "float", "pointer", "comma", "exp", "bigint",
{"mve", "nomve", "+mve", "-mve"},
{"mve.fp", "nomve.fp", "+mve.fp", "-mve.fp"}};
- for (unsigned i = 0; i < array_lengthof(ArchExt); i++) {
+ for (unsigned i = 0; i < std::size(ArchExt); i++) {
EXPECT_EQ(StringRef(ArchExt[i][2]), ARM::getArchExtFeature(ArchExt[i][0]));
EXPECT_EQ(StringRef(ArchExt[i][3]), ARM::getArchExtFeature(ArchExt[i][1]));
}
TEST(TargetParserTest, ARMparseHWDiv) {
const char *hwdiv[] = {"thumb", "arm", "arm,thumb", "thumb,arm"};
- for (unsigned i = 0; i < array_lengthof(hwdiv); i++)
+ for (unsigned i = 0; i < std::size(hwdiv); i++)
EXPECT_NE(ARM::AEK_INVALID, ARM::parseHWDiv((StringRef)hwdiv[i]));
}
"v8.7a", "v8.8-a", "v8.8a", "v8-r", "v8m.base", "v8m.main",
"v8.1m.main"};
- for (unsigned i = 0; i < array_lengthof(Arch); i++) {
+ for (unsigned i = 0; i < std::size(Arch); i++) {
std::string arm_1 = "armeb" + (std::string)(Arch[i]);
std::string arm_2 = "arm" + (std::string)(Arch[i]) + "eb";
std::string arm_3 = "arm" + (std::string)(Arch[i]);
}
TEST(TargetParserTest, ARMparseArchProfile) {
- for (unsigned i = 0; i < array_lengthof(ARMArch); i++) {
+ for (unsigned i = 0; i < std::size(ARMArch); i++) {
switch (ARM::parseArch(ARMArch[i])) {
case ARM::ArchKind::ARMV6M:
case ARM::ArchKind::ARMV7M:
}
TEST(TargetParserTest, ARMparseArchVersion) {
- for (unsigned i = 0; i < array_lengthof(ARMArch); i++)
+ for (unsigned i = 0; i < std::size(ARMArch); i++)
if (((std::string)ARMArch[i]).substr(0, 4) == "armv")
EXPECT_EQ((ARMArch[i][4] - 48u), ARM::parseArchVersion(ARMArch[i]));
else
{"pmuv3", "nopmuv3", "+perfmon", "-perfmon"},
};
- for (unsigned i = 0; i < array_lengthof(ArchExt); i++) {
+ for (unsigned i = 0; i < std::size(ArchExt); i++) {
EXPECT_EQ(StringRef(ArchExt[i][2]),
AArch64::getArchExtFeature(ArchExt[i][0]));
EXPECT_EQ(StringRef(ArchExt[i][3]),
if (!MappedFile)
return windows_error(::GetLastError());
- Success = ::GetMappedFileNameA(::GetCurrentProcess(),
- MappedFile,
- Filename,
- array_lengthof(Filename) - 1);
+ Success = ::GetMappedFileNameA(::GetCurrentProcess(), MappedFile, Filename,
+ std::size(Filename) - 1);
if (!Success)
return windows_error(::GetLastError());
LPCSTR Extension = NULL;
if (ext.size() && ext[0] == '.')
Extension = ext.c_str();
- DWORD length = ::SearchPathA(NULL,
- Program.c_str(),
- Extension,
- array_lengthof(PathName),
- PathName,
- NULL);
+ DWORD length = ::SearchPathA(NULL, Program.c_str(), Extension,
+ std::size(PathName), PathName, NULL);
if (length == 0)
ec = windows_error(::GetLastError());
- else if (length > array_lengthof(PathName)) {
+ else if (length > std::size(PathName)) {
// This may have been the file, return with error.
ec = windows_error(ERROR_BUFFER_OVERFLOW);
break;
O.indent(2) << " makeArrayRef(OpToPatterns),\n";
O.indent(2) << " makeArrayRef(Patterns),\n";
O.indent(2) << " makeArrayRef(Conds),\n";
- O.indent(2) << " StringRef(AsmStrings, array_lengthof(AsmStrings)),\n";
+ O.indent(2) << " StringRef(AsmStrings, std::size(AsmStrings)),\n";
if (MCOpPredicates.empty())
O.indent(2) << " nullptr,\n";
else
nullptr};
unsigned CodeGenTarget::getNumFixedInstructions() {
- return array_lengthof(FixedInstrs) - 1;
+ return std::size(FixedInstrs) - 1;
}
/// Return all of the instructions defined by the target, ordered by
OS << "extern const unsigned " << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << I << "Dwarf2LSize";
if (!isCtor)
- OS << " = array_lengthof(" << Namespace
+ OS << " = std::size(" << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << I << "Dwarf2L);\n\n";
else
OS << ";\n\n";
OS << "extern const unsigned " << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2DwarfSize";
if (!isCtor)
- OS << " = array_lengthof(" << Namespace
+ OS << " = std::size(" << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2Dwarf);\n\n";
else
OS << ";\n\n";
}
DisassemblerTables::DisassemblerTables() {
- for (unsigned i = 0; i < llvm::array_lengthof(Tables); i++)
+ for (unsigned i = 0; i < std::size(Tables); i++)
Tables[i] = std::make_unique<ContextDecision>();
HasConflicts = false;