From: David Majnemer Date: Thu, 11 Aug 2016 21:15:00 +0000 (+0000) Subject: Use range algorithms instead of unpacking begin/end X-Git-Tag: llvmorg-4.0.0-rc1~12681 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0a16c228463ca58c7828b5fd9bdd10b3ba612bbf;p=platform%2Fupstream%2Fllvm.git Use range algorithms instead of unpacking begin/end No functionality change is intended. llvm-svn: 278417 --- diff --git a/llvm/include/llvm/ADT/ArrayRef.h b/llvm/include/llvm/ADT/ArrayRef.h index 1163a9e..bfe8514 100644 --- a/llvm/include/llvm/ADT/ArrayRef.h +++ b/llvm/include/llvm/ADT/ArrayRef.h @@ -13,6 +13,7 @@ #include "llvm/ADT/Hashing.h" #include "llvm/ADT/None.h" #include "llvm/ADT/SmallVector.h" +#include #include namespace llvm { @@ -78,6 +79,11 @@ namespace llvm { /*implicit*/ ArrayRef(const std::vector &Vec) : Data(Vec.data()), Length(Vec.size()) {} + /// Construct an ArrayRef from a std::array + template + /*implicit*/ LLVM_CONSTEXPR ArrayRef(const std::array &Arr) + : Data(Arr.data()), Length(N) {} + /// Construct an ArrayRef from a C array. template /*implicit*/ LLVM_CONSTEXPR ArrayRef(const T (&Arr)[N]) @@ -257,6 +263,11 @@ namespace llvm { /*implicit*/ MutableArrayRef(std::vector &Vec) : ArrayRef(Vec) {} + /// Construct an ArrayRef from a std::array + template + /*implicit*/ LLVM_CONSTEXPR MutableArrayRef(std::array &Arr) + : ArrayRef(Arr) {} + /// Construct an MutableArrayRef from a C array. template /*implicit*/ LLVM_CONSTEXPR MutableArrayRef(T (&Arr)[N]) diff --git a/llvm/include/llvm/Support/CommandLine.h b/llvm/include/llvm/Support/CommandLine.h index 70465a0..a0ec262 100644 --- a/llvm/include/llvm/Support/CommandLine.h +++ b/llvm/include/llvm/Support/CommandLine.h @@ -21,6 +21,7 @@ #define LLVM_SUPPORT_COMMANDLINE_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" @@ -278,7 +279,7 @@ public: return getNumOccurrencesFlag() == cl::ConsumeAfter; } bool isInAllSubCommands() const { - return std::any_of(Subs.begin(), Subs.end(), [](const SubCommand *SC) { + return any_of(Subs, [](const SubCommand *SC) { return SC == &*AllSubCommands; }); } diff --git a/llvm/lib/Analysis/CodeMetrics.cpp b/llvm/lib/Analysis/CodeMetrics.cpp index 576dca4..8e79503 100644 --- a/llvm/lib/Analysis/CodeMetrics.cpp +++ b/llvm/lib/Analysis/CodeMetrics.cpp @@ -45,8 +45,7 @@ static void completeEphemeralValues(SmallVector &WorkSet, continue; // If all uses of this value are ephemeral, then so is this value. - if (!std::all_of(V->user_begin(), V->user_end(), - [&](const User *U) { return EphValues.count(U); })) + if (!all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) continue; EphValues.insert(V); diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp index a7cf134..8289e58 100644 --- a/llvm/lib/Analysis/GlobalsModRef.cpp +++ b/llvm/lib/Analysis/GlobalsModRef.cpp @@ -857,22 +857,22 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS, if (CS.doesNotAccessMemory()) return MRI_NoModRef; ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef; - + // Iterate through all the arguments to the called function. If any argument // is based on GV, return the conservative result. for (auto &A : CS.args()) { SmallVector Objects; GetUnderlyingObjects(A, Objects, DL); - + // All objects must be identified. - if (!std::all_of(Objects.begin(), Objects.end(), isIdentifiedObject) && + if (!all_of(Objects, isIdentifiedObject) && // Try ::alias to see if all objects are known not to alias GV. - !std::all_of(Objects.begin(), Objects.end(), [&](Value *V) { + !all_of(Objects, [&](Value *V) { return this->alias(MemoryLocation(V), MemoryLocation(GV)) == NoAlias; - })) + })) return ConservativeResult; - if (std::find(Objects.begin(), Objects.end(), GV) != Objects.end()) + if (is_contained(Objects, GV)) return ConservativeResult; } diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index e6aed6d..95273a7 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -2104,8 +2104,8 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, GetUnderlyingObjects(RHS, RHSUObjs, DL); // Is the set of underlying objects all noalias calls? - auto IsNAC = [](SmallVectorImpl &Objects) { - return std::all_of(Objects.begin(), Objects.end(), isNoAliasCall); + auto IsNAC = [](ArrayRef Objects) { + return all_of(Objects, isNoAliasCall); }; // Is the set of underlying objects all things which must be disjoint from @@ -2114,8 +2114,8 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, // live with the compared-to allocation). For globals, we exclude symbols // that might be resolve lazily to symbols in another dynamically-loaded // library (and, thus, could be malloc'ed by the implementation). - auto IsAllocDisjoint = [](SmallVectorImpl &Objects) { - return std::all_of(Objects.begin(), Objects.end(), [](Value *V) { + auto IsAllocDisjoint = [](ArrayRef Objects) { + return all_of(Objects, [](Value *V) { if (const AllocaInst *AI = dyn_cast(V)) return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); if (const GlobalValue *GV = dyn_cast(V)) diff --git a/llvm/lib/Analysis/LazyCallGraph.cpp b/llvm/lib/Analysis/LazyCallGraph.cpp index acff852..404b831 100644 --- a/llvm/lib/Analysis/LazyCallGraph.cpp +++ b/llvm/lib/Analysis/LazyCallGraph.cpp @@ -1207,8 +1207,7 @@ LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN, Node &TargetN) { if (!Result.empty()) assert(!IsLeaf && "This SCC cannot be a leaf as we have split out new " "SCCs by removing this edge."); - if (!std::any_of(G->LeafRefSCCs.begin(), G->LeafRefSCCs.end(), - [&](RefSCC *C) { return C == this; })) + if (none_of(G->LeafRefSCCs, [&](RefSCC *C) { return C == this; })) assert(!IsLeaf && "This SCC cannot be a leaf as it already had child " "SCCs before we removed this edge."); #endif diff --git a/llvm/lib/Analysis/LoopInfo.cpp b/llvm/lib/Analysis/LoopInfo.cpp index 9a8789c..9d2e38e 100644 --- a/llvm/lib/Analysis/LoopInfo.cpp +++ b/llvm/lib/Analysis/LoopInfo.cpp @@ -177,9 +177,8 @@ bool Loop::isRecursivelyLCSSAForm(DominatorTree &DT) const { if (!isLCSSAForm(DT)) return false; - return std::all_of(begin(), end(), [&](const Loop *L) { - return L->isRecursivelyLCSSAForm(DT); - }); + return all_of(*this, + [&](const Loop *L) { return L->isRecursivelyLCSSAForm(DT); }); } bool Loop::isLoopSimplifyForm() const { @@ -366,8 +365,7 @@ Loop::getUniqueExitBlocks(SmallVectorImpl &ExitBlocks) const { // In case of multiple edges from current block to exit block, collect // only one edge in ExitBlocks. Use switchExitBlocks to keep track of // duplicate edges. - if (std::find(SwitchExitBlocks.begin(), SwitchExitBlocks.end(), Successor) - == SwitchExitBlocks.end()) { + if (!is_contained(SwitchExitBlocks, Successor)) { SwitchExitBlocks.push_back(Successor); ExitBlocks.push_back(Successor); } @@ -536,8 +534,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) { assert(Subloop && "subloop is not an ancestor of the original loop"); } // Get the current nearest parent of the Subloop exits, initially Unloop. - NearLoop = - SubloopParents.insert(std::make_pair(Subloop, &Unloop)).first->second; + NearLoop = SubloopParents.insert({Subloop, &Unloop}).first->second; } succ_iterator I = succ_begin(BB), E = succ_end(BB); diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp index c9ac2bd..e457f1c 100644 --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -232,13 +232,13 @@ bool llvm::moduleCanBeRenamedForThinLTO(const Module &M) { SmallPtrSet Used; collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false); bool LocalIsUsed = - llvm::any_of(Used, [](GlobalValue *V) { return V->hasLocalLinkage(); }); + any_of(Used, [](GlobalValue *V) { return V->hasLocalLinkage(); }); if (!LocalIsUsed) return true; // Walk all the instructions in the module and find if one is inline ASM - auto HasInlineAsm = llvm::any_of(M, [](const Function &F) { - return llvm::any_of(instructions(F), [](const Instruction &I) { + auto HasInlineAsm = any_of(M, [](const Function &F) { + return any_of(instructions(F), [](const Instruction &I) { const CallInst *CallI = dyn_cast(&I); if (!CallI) return false; diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp index 83c803d..8959645 100644 --- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp @@ -549,9 +549,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { if (!L->isLoopInvariant(V)) break; - bool AnyIndexNotLoopInvariant = - std::any_of(GepIndices.begin(), GepIndices.end(), - [L](Value *Op) { return !L->isLoopInvariant(Op); }); + bool AnyIndexNotLoopInvariant = any_of( + GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); }); if (AnyIndexNotLoopInvariant) break; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index c4403c6..ac7dabe 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -406,7 +406,7 @@ static bool isEphemeralValueOf(Instruction *I, const Value *E) { // The instruction defining an assumption's condition itself is always // considered ephemeral to that assumption (even if it has other // non-ephemeral users). See r246696's test case for an example. - if (std::find(I->op_begin(), I->op_end(), E) != I->op_end()) + if (is_contained(I->operands(), E)) return true; while (!WorkSet.empty()) { @@ -415,8 +415,7 @@ static bool isEphemeralValueOf(Instruction *I, const Value *E) { continue; // If all uses of this value are ephemeral, then so is this value. - if (std::all_of(V->user_begin(), V->user_end(), - [&](const User *U) { return EphValues.count(U); })) { + if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) { if (V == E) return true; diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp index c118ecf..9cd21e7 100644 --- a/llvm/lib/AsmParser/LLLexer.cpp +++ b/llvm/lib/AsmParser/LLLexer.cpp @@ -817,7 +817,7 @@ lltok::Kind LLLexer::LexIdentifier() { int len = CurPtr-TokStart-3; uint32_t bits = len * 4; StringRef HexStr(TokStart + 3, len); - if (!std::all_of(HexStr.begin(), HexStr.end(), isxdigit)) { + if (!all_of(HexStr, isxdigit)) { // Bad token, return it as an error. CurPtr = TokStart+3; return lltok::Error; diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h index 20acd45..dd12c32 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h @@ -128,7 +128,7 @@ public: void addValues(ArrayRef Vals) { Values.append(Vals.begin(), Vals.end()); sortUniqueValues(); - assert(std::all_of(Values.begin(), Values.end(), [](DebugLocEntry::Value V){ + assert(all_of(Values, [](DebugLocEntry::Value V) { return V.isBitPiece(); }) && "value must be a piece"); } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 41d8605..f3c84a5 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -449,8 +449,8 @@ DwarfDebug::constructDwarfCompileUnit(const DICompileUnit *DIUnit) { DIUnit->getSplitDebugFilename()); } - CUMap.insert(std::make_pair(DIUnit, &NewCU)); - CUDieMap.insert(std::make_pair(&Die, &NewCU)); + CUMap.insert({DIUnit, &NewCU}); + CUDieMap.insert({&Die, &NewCU}); return NewCU; } @@ -844,8 +844,7 @@ DwarfDebug::buildLocationList(SmallVectorImpl &DebugLoc, // If this piece overlaps with any open ranges, truncate them. const DIExpression *DIExpr = Begin->getDebugExpression(); - auto Last = std::remove_if(OpenRanges.begin(), OpenRanges.end(), - [&](DebugLocEntry::Value R) { + auto Last = remove_if(OpenRanges, [&](DebugLocEntry::Value R) { return piecesOverlap(DIExpr, R.getExpression()); }); OpenRanges.erase(Last, OpenRanges.end()); @@ -1437,7 +1436,7 @@ void DebugLocEntry::finalize(const AsmPrinter &AP, const DebugLocEntry::Value &Value = Values[0]; if (Value.isBitPiece()) { // Emit all pieces that belong to the same variable and range. - assert(std::all_of(Values.begin(), Values.end(), [](DebugLocEntry::Value P) { + assert(all_of(Values, [](DebugLocEntry::Value P) { return P.isBitPiece(); }) && "all values are expected to be pieces"); assert(std::is_sorted(Values.begin(), Values.end()) && @@ -1889,8 +1888,7 @@ void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU, getDwoLineTable(CU)); DwarfTypeUnit &NewTU = *OwnedUnit; DIE &UnitDie = NewTU.getUnitDie(); - TypeUnitsUnderConstruction.push_back( - std::make_pair(std::move(OwnedUnit), CTy)); + TypeUnitsUnderConstruction.emplace_back(std::move(OwnedUnit), CTy); NewTU.addUInt(UnitDie, dwarf::DW_AT_language, dwarf::DW_FORM_data2, CU.getLanguage()); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h index 6b06757..7d36fe9 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -134,7 +134,7 @@ public: Expr.append(V.Expr.begin(), V.Expr.end()); FrameIndex.append(V.FrameIndex.begin(), V.FrameIndex.end()); - assert(std::all_of(Expr.begin(), Expr.end(), [](const DIExpression *E) { + assert(all_of(Expr, [](const DIExpression *E) { return E && E->isBitPiece(); }) && "conflicting locations for variable"); } diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 8afa9ae..8482307 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -2181,8 +2181,8 @@ void MachineInstr::setPhysRegsDeadExcept(ArrayRef UsedRegs, unsigned Reg = MO.getReg(); if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue; // If there are no uses, including partial uses, the def is dead. - if (std::none_of(UsedRegs.begin(), UsedRegs.end(), - [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); })) + if (none_of(UsedRegs, + [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); })) MO.setIsDead(); } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 7fc7199..8870bcc 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11611,10 +11611,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { // Check if this store interferes with any of the loads that we found. // If we find a load that alias with this store. Stop the sequence. - if (std::any_of(AliasLoadNodes.begin(), AliasLoadNodes.end(), - [&](LSBaseSDNode* Ldn) { - return isAlias(Ldn, StoreNodes[i].MemNode); - })) + if (any_of(AliasLoadNodes, [&](LSBaseSDNode *Ldn) { + return isAlias(Ldn, StoreNodes[i].MemNode); + })) break; // Mark this node as useful. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 17b266b..9c06a61 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3394,8 +3394,8 @@ SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, // All operands must be vector types with the same number of elements as // the result type and must be either UNDEF or a build vector of constant // or UNDEF scalars. - if (!std::all_of(Ops.begin(), Ops.end(), IsConstantBuildVectorOrUndef) || - !std::all_of(Ops.begin(), Ops.end(), IsScalarOrSameVectorSize)) + if (!all_of(Ops, IsConstantBuildVectorOrUndef) || + !all_of(Ops, IsScalarOrSameVectorSize)) return SDValue(); // If we are comparing vectors, then the result needs to be a i1 boolean diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 836da78..8374e2e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2730,7 +2730,7 @@ void SelectionDAGBuilder::visitFCmp(const User &I) { // Check if the condition of the select has one use or two users that are both // selects with the same condition. static bool hasOnlySelectUsers(const Value *Cond) { - return std::all_of(Cond->user_begin(), Cond->user_end(), [](const Value *V) { + return all_of(Cond->users(), [](const Value *V) { return isa(V); }); } diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp index 4dfeacf..93b3d28 100644 --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -721,10 +721,11 @@ AttributeSet AttributeSet::get(LLVMContext &C, const std::pair &RHS) { return LHS.first < RHS.first; }) && "Misordered Attributes list!"); - assert(std::none_of(Attrs.begin(), Attrs.end(), - [](const std::pair &Pair) { - return Pair.second.hasAttribute(Attribute::None); - }) && "Pointless attribute!"); + assert(none_of(Attrs, + [](const std::pair &Pair) { + return Pair.second.hasAttribute(Attribute::None); + }) && + "Pointless attribute!"); // Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes // list. @@ -738,8 +739,7 @@ AttributeSet AttributeSet::get(LLVMContext &C, ++I; } - AttrPairVec.push_back(std::make_pair(Index, - AttributeSetNode::get(C, AttrVec))); + AttrPairVec.emplace_back(Index, AttributeSetNode::get(C, AttrVec)); } return getImpl(C, AttrPairVec); @@ -791,13 +791,12 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, default: Attr = Attribute::get(C, Kind); } - Attrs.push_back(std::make_pair(Index, Attr)); + Attrs.emplace_back(Index, Attr); } // Add target-dependent (string) attributes. for (const auto &TDA : B.td_attrs()) - Attrs.push_back( - std::make_pair(Index, Attribute::get(C, TDA.first, TDA.second))); + Attrs.emplace_back(Index, Attribute::get(C, TDA.first, TDA.second)); return get(C, Attrs); } @@ -806,7 +805,7 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, ArrayRef Kinds) { SmallVector, 8> Attrs; for (Attribute::AttrKind K : Kinds) - Attrs.push_back(std::make_pair(Index, Attribute::get(C, K))); + Attrs.emplace_back(Index, Attribute::get(C, K)); return get(C, Attrs); } @@ -814,7 +813,7 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, ArrayRef Kinds) { SmallVector, 8> Attrs; for (StringRef K : Kinds) - Attrs.push_back(std::make_pair(Index, Attribute::get(C, K))); + Attrs.emplace_back(Index, Attribute::get(C, K)); return get(C, Attrs); } diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 938b2b2..883cf29 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1557,7 +1557,7 @@ MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { if (!T) return &N; - if (!llvm::any_of(T->operands(), isOldLoopArgument)) + if (none_of(T->operands(), isOldLoopArgument)) return &N; SmallVector Ops; diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 761d3f0..4eb33ad 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -4304,8 +4304,8 @@ void Verifier::verifyCompileUnits() { if (CUs) Listed.insert(CUs->op_begin(), CUs->op_end()); Assert( - std::all_of(CUVisited.begin(), CUVisited.end(), - [&Listed](const Metadata *CU) { return Listed.count(CU); }), + all_of(CUVisited, + [&Listed](const Metadata *CU) { return Listed.count(CU); }), "All DICompileUnits must be listed in llvm.dbg.cu"); CUVisited.clear(); } diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp index cb839f9..029d756 100644 --- a/llvm/lib/ProfileData/InstrProfWriter.cpp +++ b/llvm/lib/ProfileData/InstrProfWriter.cpp @@ -208,8 +208,7 @@ bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) { return true; for (const auto &Func : PD) { const InstrProfRecord &IPR = Func.second; - if (std::any_of(IPR.Counts.begin(), IPR.Counts.end(), - [](uint64_t Count) { return Count > 0; })) + if (any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; })) return true; } return false; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index e505a75..d741525 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -6328,7 +6328,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { EVT SubVT = SubV1.getValueType(); // We expect these to have been canonicalized to -1. - assert(std::all_of(ShuffleMask.begin(), ShuffleMask.end(), [&](int i) { + assert(all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!"); diff --git a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp index 5f008f5..e673892 100644 --- a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp +++ b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp @@ -245,7 +245,7 @@ bool HexagonDCE::rewrite(NodeAddr IA, SetVector &Remove) { if (&DA.Addr->getOp() != &Op) continue; Defs = DFG.getRelatedRefs(IA, DA); - if (!std::all_of(Defs.begin(), Defs.end(), IsDead)) + if (!all_of(Defs, IsDead)) return false; break; } diff --git a/llvm/lib/Target/Hexagon/RDFGraph.cpp b/llvm/lib/Target/Hexagon/RDFGraph.cpp index 5070d9c..156bbda 100644 --- a/llvm/lib/Target/Hexagon/RDFGraph.cpp +++ b/llvm/lib/Target/Hexagon/RDFGraph.cpp @@ -1509,7 +1509,7 @@ void DataFlowGraph::linkRefUp(NodeAddr IA, NodeAddr TA, bool PrecUp = RAI.covers(QR, RR); // Skip all defs that are aliased to any of the defs that we have already // seen. If we encounter a covering def, stop the stack traversal early. - if (std::any_of(Defs.begin(), Defs.end(), AliasQR)) { + if (any_of(Defs, AliasQR)) { if (PrecUp) break; continue; diff --git a/llvm/lib/Target/Hexagon/RDFLiveness.cpp b/llvm/lib/Target/Hexagon/RDFLiveness.cpp index 641f014..e25596b 100644 --- a/llvm/lib/Target/Hexagon/RDFLiveness.cpp +++ b/llvm/lib/Target/Hexagon/RDFLiveness.cpp @@ -400,7 +400,7 @@ void Liveness::computePhiInfo() { for (auto I = Uses.begin(), E = Uses.end(); I != E; ) { auto UA = DFG.addr(*I); NodeList RDs = getAllReachingDefs(UI->first, UA); - if (std::any_of(RDs.begin(), RDs.end(), HasDef)) + if (any_of(RDs, HasDef)) ++I; else I = Uses.erase(I); diff --git a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp index 56fecb4..b0c7bed 100644 --- a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp +++ b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp @@ -134,8 +134,7 @@ class PPCBoolRetToInt : public FunctionPass { }; const auto &Users = P->users(); const auto &Operands = P->operands(); - if (!std::all_of(Users.begin(), Users.end(), IsValidUser) || - !std::all_of(Operands.begin(), Operands.end(), IsValidOperand)) + if (!all_of(Users, IsValidUser) || !all_of(Operands, IsValidOperand)) ToRemove.push_back(P); } @@ -153,8 +152,7 @@ class PPCBoolRetToInt : public FunctionPass { // Condition 4 and 5 const auto &Users = P->users(); const auto &Operands = P->operands(); - if (!std::all_of(Users.begin(), Users.end(), IsPromotable) || - !std::all_of(Operands.begin(), Operands.end(), IsPromotable)) + if (!all_of(Users, IsPromotable) || !all_of(Operands, IsPromotable)) ToRemove.push_back(P); } } @@ -199,7 +197,7 @@ class PPCBoolRetToInt : public FunctionPass { auto Defs = findAllDefs(U); // If the values are all Constants or Arguments, don't bother - if (!std::any_of(Defs.begin(), Defs.end(), isa)) + if (none_of(Defs, isa)) return false; // Presently, we only know how to handle PHINode, Constant, Arguments and diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 2ccbcd9..2e1df66 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -4060,8 +4060,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( return false; // Functions containing by val parameters are not supported. - if (std::any_of(Ins.begin(), Ins.end(), - [](const ISD::InputArg& IA) { return IA.Flags.isByVal(); })) + if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) return false; // No TCO/SCO on indirect call because Caller have to restore its TOC diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 771dfdb..2e66121 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4821,7 +4821,7 @@ static bool getTargetShuffleMaskIndices(SDValue MaskNode, // We can always decode if the buildvector is all zero constants, // but can't use isBuildVectorAllZeros as it might contain UNDEFs. - if (llvm::all_of(MaskNode->ops(), X86::isZeroNode)) { + if (all_of(MaskNode->ops(), X86::isZeroNode)) { RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0); return true; } @@ -5087,7 +5087,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero, // Check if we're getting a shuffle mask with zero'd elements. if (!AllowSentinelZero) - if (llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; })) + if (any_of(Mask, [](int M) { return M == SM_SentinelZero; })) return false; // If we have a fake unary shuffle, the shuffle mask is spread across two @@ -5197,11 +5197,10 @@ static bool resolveTargetShuffleInputs(SDValue Op, SDValue &Op0, SDValue &Op1, return false; int NumElts = Mask.size(); - bool Op0InUse = std::any_of(Mask.begin(), Mask.end(), [NumElts](int Idx) { + bool Op0InUse = any_of(Mask, [NumElts](int Idx) { return 0 <= Idx && Idx < NumElts; }); - bool Op1InUse = std::any_of(Mask.begin(), Mask.end(), - [NumElts](int Idx) { return NumElts <= Idx; }); + bool Op1InUse = any_of(Mask, [NumElts](int Idx) { return NumElts <= Idx; }); Op0 = Op0InUse ? Ops[0] : SDValue(); Op1 = Op1InUse ? Ops[1] : SDValue(); @@ -10352,8 +10351,8 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef Mask, // with a pack. SDValue V = V1; - int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1}; - int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1}; + std::array LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}}; + std::array HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}}; for (int i = 0; i < 16; ++i) if (Mask[i] >= 0) (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i]; @@ -10364,10 +10363,8 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef Mask, // Check if any of the odd lanes in the v16i8 are used. If not, we can mask // them out and avoid using UNPCK{L,H} to extract the elements of V as // i16s. - if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask), - [](int M) { return M >= 0 && M % 2 == 1; }) && - std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask), - [](int M) { return M >= 0 && M % 2 == 1; })) { + if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) && + none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) { // Use a mask to drop the high bytes. VLoHalf = DAG.getBitcast(MVT::v8i16, V); VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf, @@ -14338,9 +14335,8 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { Opcode == X86ISD::CMPP); }; - if (IsPackableComparison(In) || - (In.getOpcode() == ISD::CONCAT_VECTORS && - std::all_of(In->op_begin(), In->op_end(), IsPackableComparison))) { + if (IsPackableComparison(In) || (In.getOpcode() == ISD::CONCAT_VECTORS && + all_of(In->ops(), IsPackableComparison))) { if (SDValue V = truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget)) return V; } @@ -25282,7 +25278,7 @@ static bool combineX86ShuffleChain(ArrayRef Inputs, SDValue Root, return false; bool MaskContainsZeros = - llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }); + any_of(Mask, [](int M) { return M == SM_SentinelZero; }); // If we have a single input shuffle with different shuffle patterns in the // the 128-bit lanes use the variable mask to VPERMILPS. @@ -25578,11 +25574,11 @@ static bool combineX86ShufflesRecursively(ArrayRef SrcOps, } // Handle the all undef/zero cases early. - if (llvm::all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) { + if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) { DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType())); return true; } - if (llvm::all_of(Mask, [](int Idx) { return Idx < 0; })) { + if (all_of(Mask, [](int Idx) { return Idx < 0; })) { // TODO - should we handle the mixed zero/undef case as well? Just returning // a zero mask will lose information on undef elements possibly reducing // future combine possibilities. @@ -25596,8 +25592,7 @@ static bool combineX86ShufflesRecursively(ArrayRef SrcOps, for (int i = 0, e = Ops.size(); i < e; ++i) { int lo = UsedOps.size() * MaskWidth; int hi = lo + MaskWidth; - if (std::any_of(Mask.begin(), Mask.end(), - [lo, hi](int i) { return (lo <= i) && (i < hi); })) { + if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) { UsedOps.push_back(Ops[i]); continue; } @@ -30124,7 +30119,7 @@ static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL, MVT InSVT = InVT.getScalarType(); assert(DAG.getTargetLoweringInfo().getBooleanContents(InVT) == - llvm::TargetLoweringBase::ZeroOrNegativeOneBooleanContent && + TargetLoweringBase::ZeroOrNegativeOneBooleanContent && "Expected comparison result to be zero/all bits"); // Check we have a truncation suited for PACKSS. diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index b68f8e9..52c1cc5 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1737,7 +1737,7 @@ static bool isPointerValueDeadOnEntryToFunction( for (auto *L : Loads) { auto *LTy = L->getType(); - if (!std::any_of(Stores.begin(), Stores.end(), [&](StoreInst *S) { + if (none_of(Stores, [&](const StoreInst *S) { auto *STy = S->getValueOperand()->getType(); // The load is only dominated by the store if DomTree says so // and the number of bits loaded in L is less than or equal to diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 1914576..1603278 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -59,14 +59,14 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, // eliminate the markers. SmallVector, 35> ValuesToInspect; - ValuesToInspect.push_back(std::make_pair(V, false)); + ValuesToInspect.emplace_back(V, false); while (!ValuesToInspect.empty()) { auto ValuePair = ValuesToInspect.pop_back_val(); const bool IsOffset = ValuePair.second; for (auto &U : ValuePair.first->uses()) { - Instruction *I = cast(U.getUser()); + auto *I = cast(U.getUser()); - if (LoadInst *LI = dyn_cast(I)) { + if (auto *LI = dyn_cast(I)) { // Ignore non-volatile loads, they are always ok. if (!LI->isSimple()) return false; continue; @@ -74,14 +74,13 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, if (isa(I) || isa(I)) { // If uses of the bitcast are ok, we are ok. - ValuesToInspect.push_back(std::make_pair(I, IsOffset)); + ValuesToInspect.emplace_back(I, IsOffset); continue; } - if (GetElementPtrInst *GEP = dyn_cast(I)) { + if (auto *GEP = dyn_cast(I)) { // If the GEP has all zero indices, it doesn't offset the pointer. If it // doesn't, it does. - ValuesToInspect.push_back( - std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices())); + ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); continue; } @@ -477,7 +476,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) && !DL.isNonIntegralPointerType(Ty)) { - if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) { + if (all_of(LI.users(), [&LI](User *U) { auto *SI = dyn_cast(U); return SI && SI->getPointerOperand() != &LI; })) { diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index 0d7364b..2ae4262 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -843,8 +843,7 @@ private: for (auto *Phi : UsePhis) { auto In = Phi->incoming_values(); - if (std::all_of(In.begin(), In.end(), - [&](Use &U){return U == NewMemAcc;})) { + if (all_of(In, [&](Use &U) { return U == NewMemAcc; })) { Phi->replaceAllUsesWith(NewMemAcc); MSSA->removeMemoryAccess(Phi); } diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp index 70d014f..8bd0a96 100644 --- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp @@ -599,8 +599,8 @@ struct LoopInterchange : public FunctionPass { } // end of namespace bool LoopInterchangeLegality::areAllUsesReductions(Instruction *Ins, Loop *L) { - return !std::any_of(Ins->user_begin(), Ins->user_end(), [=](User *U) -> bool { - PHINode *UserIns = dyn_cast(U); + return none_of(Ins->users(), [=](User *U) -> bool { + auto *UserIns = dyn_cast(U); RecurrenceDescriptor RD; return !UserIns || !RecurrenceDescriptor::isReductionPHI(UserIns, L, RD); }); diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp index f29228c..9e3d7e1 100644 --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -113,10 +113,9 @@ bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L, DominatorTree *DT) { SmallVector Latches; L->getLoopLatches(Latches); - return std::all_of(Latches.begin(), Latches.end(), - [&](const BasicBlock *Latch) { - return DT->dominates(StoreBlock, Latch); - }); + return all_of(Latches, [&](const BasicBlock *Latch) { + return DT->dominates(StoreBlock, Latch); + }); } /// \brief Return true if the load is not executed on all paths in the loop. diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index d64c658..e77a893 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -521,16 +521,14 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P) { if (Args.erase(C)) NeedLift = true; else if (MayAlias) { - NeedLift = std::any_of(MemLocs.begin(), MemLocs.end(), - [C, &AA](const MemoryLocation &ML) { - return AA.getModRefInfo(C, ML); - }); + NeedLift = any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { + return AA.getModRefInfo(C, ML); + }); if (!NeedLift) - NeedLift = std::any_of(CallSites.begin(), CallSites.end(), - [C, &AA](const ImmutableCallSite &CS) { - return AA.getModRefInfo(C, CS); - }); + NeedLift = any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) { + return AA.getModRefInfo(C, CS); + }); } if (!NeedLift) diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index bab39a3..ea40022 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -2268,8 +2268,7 @@ static bool shouldRewriteStatepointsIn(Function &F) { void RewriteStatepointsForGC::stripNonValidAttributes(Module &M) { #ifndef NDEBUG - assert(std::any_of(M.begin(), M.end(), shouldRewriteStatepointsIn) && - "precondition!"); + assert(any_of(M, shouldRewriteStatepointsIn) && "precondition!"); #endif for (Function &F : M) diff --git a/llvm/lib/Transforms/Scalar/SCCP.cpp b/llvm/lib/Transforms/Scalar/SCCP.cpp index d068b15..9025a04 100644 --- a/llvm/lib/Transforms/Scalar/SCCP.cpp +++ b/llvm/lib/Transforms/Scalar/SCCP.cpp @@ -1534,8 +1534,7 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) { Constant *Const = nullptr; if (V->getType()->isStructTy()) { std::vector IVs = Solver.getStructLatticeValueFor(V); - if (std::any_of(IVs.begin(), IVs.end(), - [](LatticeVal &LV) { return LV.isOverdefined(); })) + if (any_of(IVs, [](const LatticeVal &LV) { return LV.isOverdefined(); })) return false; std::vector ConstVals; StructType *ST = dyn_cast(V->getType()); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 9a29ead..407dabb 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -432,19 +432,18 @@ class AllocaSlices::partition_iterator // cannot change the max split slice end because we just checked that // the prior partition ended prior to that max. P.SplitTails.erase( - std::remove_if( - P.SplitTails.begin(), P.SplitTails.end(), - [&](Slice *S) { return S->endOffset() <= P.EndOffset; }), + remove_if(P.SplitTails, + [&](Slice *S) { return S->endOffset() <= P.EndOffset; }), P.SplitTails.end()); - assert(std::any_of(P.SplitTails.begin(), P.SplitTails.end(), - [&](Slice *S) { - return S->endOffset() == MaxSplitSliceEndOffset; - }) && + assert(any_of(P.SplitTails, + [&](Slice *S) { + return S->endOffset() == MaxSplitSliceEndOffset; + }) && "Could not find the current max split slice offset!"); - assert(std::all_of(P.SplitTails.begin(), P.SplitTails.end(), - [&](Slice *S) { - return S->endOffset() <= MaxSplitSliceEndOffset; - }) && + assert(all_of(P.SplitTails, + [&](Slice *S) { + return S->endOffset() <= MaxSplitSliceEndOffset; + }) && "Max split slice end offset is not actually the max!"); } } diff --git a/llvm/lib/Transforms/Utils/LCSSA.cpp b/llvm/lib/Transforms/Utils/LCSSA.cpp index d15c15a..eee4f7f 100644 --- a/llvm/lib/Transforms/Utils/LCSSA.cpp +++ b/llvm/lib/Transforms/Utils/LCSSA.cpp @@ -229,7 +229,7 @@ blockDominatesAnExit(BasicBlock *BB, DominatorTree &DT, const SmallVectorImpl &ExitBlocks) { DomTreeNode *DomNode = DT.getNode(BB); - return llvm::any_of(ExitBlocks, [&](BasicBlock * EB) { + return any_of(ExitBlocks, [&](BasicBlock *EB) { return DT.dominates(DomNode, DT.getNode(EB)); }); } diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp index ce4ce93..df785bd 100644 --- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp @@ -272,8 +272,9 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force, // now we just recompute LCSSA for the outer loop, but it should be possible // to fix it in-place. bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll && - std::any_of(ExitBlocks.begin(), ExitBlocks.end(), - [&](BasicBlock *BB) { return isa(BB->begin()); }); + any_of(ExitBlocks, [](const BasicBlock *BB) { + return isa(BB->begin()); + }); // We assume a run-time trip count if the compiler cannot // figure out the loop trip count and the unroll-runtime diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index 0380292..549fc8c 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -920,7 +920,7 @@ SmallVector llvm::findDefsUsedOutsideOfLoop(Loop *L) { // be adapted into a pointer. for (auto &Inst : *Block) { auto Users = Inst.users(); - if (std::any_of(Users.begin(), Users.end(), [&](User *U) { + if (any_of(Users, [&](User *U) { auto *Use = cast(U); return !L->contains(Use->getParent()); })) diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index f3bea40..eae2b33 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1822,7 +1822,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout &DL) { return false; // Can't fold blocks that contain noduplicate or convergent calls. - if (llvm::any_of(*BB, [](const Instruction &I) { + if (any_of(*BB, [](const Instruction &I) { const CallInst *CI = dyn_cast(&I); return CI && (CI->cannotDuplicate() || CI->isConvergent()); })) diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index c10a1ff..6eafc44 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -83,7 +83,7 @@ static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) { } static bool callHasFloatingPointArgument(const CallInst *CI) { - return std::any_of(CI->op_begin(), CI->op_end(), [](const Use &OI) { + return any_of(CI->operands(), [](const Use &OI) { return OI->getType()->isFloatingPointTy(); }); } diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp index 2eade8c..0170ab1 100644 --- a/llvm/lib/Transforms/Utils/ValueMapper.cpp +++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp @@ -671,7 +671,7 @@ void MDNodeMapper::UniquedGraph::propagateChanges() { if (D.HasChanged) continue; - if (!llvm::any_of(N->operands(), [&](const Metadata *Op) { + if (none_of(N->operands(), [&](const Metadata *Op) { auto Where = Info.find(Op); return Where != Info.end() && Where->second.HasChanged; })) diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index f067857..8c49490 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -520,7 +520,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef Chain) { unsigned ChainIdx, ChainLen; for (ChainIdx = 0, ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) { Instruction *I = Chain[ChainIdx]; - if (!any_of(VectorizableChainInstrs, + if (none_of(VectorizableChainInstrs, [I](std::pair CI) { return I == CI.first; })) diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp index 307a953..f306956 100644 --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -108,24 +108,24 @@ bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP, /// hasIntegerTypes - Return true if this TypeSet contains iAny or an /// integer value type. bool EEVT::TypeSet::hasIntegerTypes() const { - return std::any_of(TypeVec.begin(), TypeVec.end(), isInteger); + return any_of(TypeVec, isInteger); } /// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or /// a floating point value type. bool EEVT::TypeSet::hasFloatingPointTypes() const { - return std::any_of(TypeVec.begin(), TypeVec.end(), isFloatingPoint); + return any_of(TypeVec, isFloatingPoint); } /// hasScalarTypes - Return true if this TypeSet contains a scalar value type. bool EEVT::TypeSet::hasScalarTypes() const { - return std::any_of(TypeVec.begin(), TypeVec.end(), isScalar); + return any_of(TypeVec, isScalar); } /// hasVectorTypes - Return true if this TypeSet contains a vAny or a vector /// value type. bool EEVT::TypeSet::hasVectorTypes() const { - return std::any_of(TypeVec.begin(), TypeVec.end(), isVector); + return any_of(TypeVec, isVector); } @@ -3602,10 +3602,9 @@ static void CombineChildVariants(TreePatternNode *Orig, // (and GPRC:$a, GPRC:$b) -> (and GPRC:$b, GPRC:$a) // which are the same pattern. Ignore the dups. if (R->canPatternMatch(ErrString, CDP) && - std::none_of(OutVariants.begin(), OutVariants.end(), - [&](TreePatternNode *Variant) { - return R->isIsomorphicTo(Variant, DepVars); - })) + none_of(OutVariants, [&](TreePatternNode *Variant) { + return R->isIsomorphicTo(Variant, DepVars); + })) OutVariants.push_back(R.release()); // Increment indices to the next permutation by incrementing the