#define LLVM_ADT_POSTORDERITERATOR_H
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
+#include <optional>
#include <set>
#include <utility>
#include <vector>
public:
// Return true if edge destination should be visited.
template <typename NodeRef>
- bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+ bool insertEdge(std::optional<NodeRef> From, NodeRef To) {
return Visited.insert(To).second;
}
// Return true if edge destination should be visited, called with From = 0 for
// the root node.
// Graph edges can be pruned by specializing this function.
- template <class NodeRef> bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+ template <class NodeRef>
+ bool insertEdge(std::optional<NodeRef> From, NodeRef To) {
return Visited.insert(To).second;
}
SmallVector<std::pair<NodeRef, ChildItTy>, 8> VisitStack;
po_iterator(NodeRef BB) {
- this->insertEdge(Optional<NodeRef>(), BB);
+ this->insertEdge(std::optional<NodeRef>(), BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
po_iterator(NodeRef BB, SetType &S)
: po_iterator_storage<SetType, ExtStorage>(S) {
- if (this->insertEdge(Optional<NodeRef>(), BB)) {
+ if (this->insertEdge(std::optional<NodeRef>(), BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeRef BB = *VisitStack.back().second++;
- if (this->insertEdge(Optional<NodeRef>(VisitStack.back().first), BB)) {
+ if (this->insertEdge(std::optional<NodeRef>(VisitStack.back().first),
+ BB)) {
// If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
#define LLVM_ANALYSIS_ALIASANALYSIS_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/MemoryLocation.h"
#include <cstdint>
#include <functional>
#include <memory>
+#include <optional>
#include <vector>
namespace llvm {
/// call-site mod-ref behavior queries. Otherwise it delegates to the specific
/// helpers above.
ModRefInfo getModRefInfo(const Instruction *I,
- const Optional<MemoryLocation> &OptLoc) {
+ const std::optional<MemoryLocation> &OptLoc) {
SimpleAAQueryInfo AAQIP(*this);
return getModRefInfo(I, OptLoc, AAQIP);
}
ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
AAQueryInfo &AAQI);
ModRefInfo getModRefInfo(const Instruction *I,
- const Optional<MemoryLocation> &OptLoc,
+ const std::optional<MemoryLocation> &OptLoc,
AAQueryInfo &AAQIP);
ModRefInfo callCapturesBefore(const Instruction *I,
const MemoryLocation &MemLoc, DominatorTree *DT,
return AA.getModRefInfoMask(Loc, AAQI, IgnoreLocals);
}
ModRefInfo getModRefInfo(const Instruction *I,
- const Optional<MemoryLocation> &OptLoc) {
+ const std::optional<MemoryLocation> &OptLoc) {
return AA.getModRefInfo(I, OptLoc, AAQI);
}
ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call2) {
/// Returns estimated weight for \p BB. std::nullopt if \p BB has no estimated
/// weight.
- Optional<uint32_t> getEstimatedBlockWeight(const BasicBlock *BB) const;
+ std::optional<uint32_t> getEstimatedBlockWeight(const BasicBlock *BB) const;
/// Returns estimated weight to enter \p L. In other words it is weight of
/// loop's header block not scaled by trip count. Returns std::nullopt if \p L
/// has no no estimated weight.
- Optional<uint32_t> getEstimatedLoopWeight(const LoopData &L) const;
+ std::optional<uint32_t> getEstimatedLoopWeight(const LoopData &L) const;
/// Return estimated weight for \p Edge. Returns std::nullopt if estimated
/// weight is unknown.
- Optional<uint32_t> getEstimatedEdgeWeight(const LoopEdge &Edge) const;
+ std::optional<uint32_t> getEstimatedEdgeWeight(const LoopEdge &Edge) const;
/// Iterates over all edges leading from \p SrcBB to \p Successors and
/// returns maximum of all estimated weights. If at least one edge has unknown
/// estimated weight std::nullopt is returned.
template <class IterT>
- Optional<uint32_t>
+ std::optional<uint32_t>
getMaxEstimatedEdgeWeight(const LoopBlock &SrcBB,
iterator_range<IterT> Successors) const;
SmallVectorImpl<LoopBlock> &LoopWorkList);
/// Returns block's weight encoded in the IR.
- Optional<uint32_t> getInitialEstimatedBlockWeight(const BasicBlock *BB);
+ std::optional<uint32_t> getInitialEstimatedBlockWeight(const BasicBlock *BB);
// Computes estimated weights for all blocks in \p F.
void computeEestimateBlockWeight(const Function &F, DominatorTree *DT,
/// first field and it is not supposed to be `nullptr`.
/// Reference edges, for example, are used for connecting broker function
/// caller to the callback function for callback call sites.
- using CallRecord = std::pair<Optional<WeakTrackingVH>, CallGraphNode *>;
+ using CallRecord = std::pair<std::optional<WeakTrackingVH>, CallGraphNode *>;
public:
using CalledFunctionsVector = std::vector<CallRecord>;
assert(!Call || !Call->getCalledFunction() ||
!Call->getCalledFunction()->isIntrinsic() ||
!Intrinsic::isLeaf(Call->getCalledFunction()->getIntrinsicID()));
- CalledFunctions.emplace_back(
- Call ? Optional<WeakTrackingVH>(Call) : Optional<WeakTrackingVH>(), M);
+ CalledFunctions.emplace_back(Call ? std::optional<WeakTrackingVH>(Call)
+ : std::optional<WeakTrackingVH>(),
+ M);
M->AddRef();
}
/// \param [in] V - the Value to find.
/// \returns The positive number corresponding to the value.
/// \returns std::nullopt if not present.
- Optional<unsigned> getGVN(Value *V) {
+ std::optional<unsigned> getGVN(Value *V) {
assert(V != nullptr && "Value is a nullptr?");
DenseMap<Value *, unsigned>::iterator VNIt = ValueToNumber.find(V);
if (VNIt == ValueToNumber.end())
/// \param [in] Num - the number to find.
/// \returns The Value associated with the number.
/// \returns std::nullopt if not present.
- Optional<Value *> fromGVN(unsigned Num) {
+ std::optional<Value *> fromGVN(unsigned Num) {
DenseMap<unsigned, Value *>::iterator VNIt = NumberToValue.find(Num);
if (VNIt == NumberToValue.end())
return std::nullopt;
/// \param N - The global value number to find the canonical number for.
/// \returns An optional containing the value, and std::nullopt if it could
/// not be found.
- Optional<unsigned> getCanonicalNum(unsigned N) {
+ std::optional<unsigned> getCanonicalNum(unsigned N) {
DenseMap<unsigned, unsigned>::iterator NCIt = NumberToCanonNum.find(N);
if (NCIt == NumberToCanonNum.end())
return std::nullopt;
/// \param N - The canonical number to find the global vlaue number for.
/// \returns An optional containing the value, and std::nullopt if it could
/// not be found.
- Optional<unsigned> fromCanonicalNum(unsigned N) {
+ std::optional<unsigned> fromCanonicalNum(unsigned N) {
DenseMap<unsigned, unsigned>::iterator CNIt = CanonNumToNumber.find(N);
if (CNIt == CanonNumToNumber.end())
return std::nullopt;
// \returns The groups of similarity ranges found in the most recently passed
// set of modules.
- Optional<SimilarityGroupList> &getSimilarity() {
+ std::optional<SimilarityGroupList> &getSimilarity() {
return SimilarityCandidates;
}
/// The SimilarityGroups found with the most recent run of \ref
/// findSimilarity. std::nullopt if there is no recent run.
- Optional<SimilarityGroupList> SimilarityCandidates;
+ std::optional<SimilarityGroupList> SimilarityCandidates;
};
} // end namespace IRSimilarity
class DefaultInlineAdvice : public InlineAdvice {
public:
DefaultInlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
- Optional<InlineCost> OIC, OptimizationRemarkEmitter &ORE,
- bool EmitRemarks = true)
+ std::optional<InlineCost> OIC,
+ OptimizationRemarkEmitter &ORE, bool EmitRemarks = true)
: InlineAdvice(Advisor, CB, ORE, OIC.has_value()), OriginalCB(&CB),
OIC(OIC), EmitRemarks(EmitRemarks) {}
private:
CallBase *const OriginalCB;
- Optional<InlineCost> OIC;
+ std::optional<InlineCost> OIC;
bool EmitRemarks;
};
protected:
InlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
- Optional<InlineContext> IC = std::nullopt);
+ std::optional<InlineContext> IC = std::nullopt);
virtual std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) = 0;
virtual std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
bool Advice);
Module &M;
FunctionAnalysisManager &FAM;
- const Optional<InlineContext> IC;
+ const std::optional<InlineContext> IC;
const std::string AnnotatedInlinePassName;
std::unique_ptr<ImportedFunctionsInliningStatistics> ImportedFunctionsStats;
/// CallSite. If we return the cost, we will emit an optimisation remark later
/// using that cost, so we won't do so from this function. Return std::nullopt
/// if inlining should not be attempted.
-Optional<InlineCost>
+std::optional<InlineCost>
shouldInline(CallBase &CB, function_ref<InlineCost(CallBase &CB)> GetInlineCost,
OptimizationRemarkEmitter &ORE, bool EnableDeferral = true);
const char *Reason = nullptr;
/// The cost-benefit pair computed by cost-benefit analysis.
- Optional<CostBenefitPair> CostBenefit = std::nullopt;
+ std::optional<CostBenefitPair> CostBenefit = std::nullopt;
// Trivial constructor, interesting logic in the factory functions below.
InlineCost(int Cost, int Threshold, int StaticBonusApplied,
const char *Reason = nullptr,
- Optional<CostBenefitPair> CostBenefit = std::nullopt)
+ std::optional<CostBenefitPair> CostBenefit = std::nullopt)
: Cost(Cost), Threshold(Threshold),
StaticBonusApplied(StaticBonusApplied), Reason(Reason),
CostBenefit(CostBenefit) {
}
static InlineCost
getAlways(const char *Reason,
- Optional<CostBenefitPair> CostBenefit = std::nullopt) {
+ std::optional<CostBenefitPair> CostBenefit = std::nullopt) {
return InlineCost(AlwaysInlineCost, 0, 0, Reason, CostBenefit);
}
static InlineCost
getNever(const char *Reason,
- Optional<CostBenefitPair> CostBenefit = std::nullopt) {
+ std::optional<CostBenefitPair> CostBenefit = std::nullopt) {
return InlineCost(NeverInlineCost, 0, 0, Reason, CostBenefit);
}
}
/// Get the cost-benefit pair which was computed by cost-benefit analysis
- Optional<CostBenefitPair> getCostBenefit() const { return CostBenefit; }
+ std::optional<CostBenefitPair> getCostBenefit() const { return CostBenefit; }
/// Get the reason of Always or Never.
const char *getReason() const {
int DefaultThreshold = -1;
/// Threshold to use for callees with inline hint.
- Optional<int> HintThreshold;
+ std::optional<int> HintThreshold;
/// Threshold to use for cold callees.
- Optional<int> ColdThreshold;
+ std::optional<int> ColdThreshold;
/// Threshold to use when the caller is optimized for size.
- Optional<int> OptSizeThreshold;
+ std::optional<int> OptSizeThreshold;
/// Threshold to use when the caller is optimized for minsize.
- Optional<int> OptMinSizeThreshold;
+ std::optional<int> OptMinSizeThreshold;
/// Threshold to use when the callsite is considered hot.
- Optional<int> HotCallSiteThreshold;
+ std::optional<int> HotCallSiteThreshold;
/// Threshold to use when the callsite is considered hot relative to function
/// entry.
- Optional<int> LocallyHotCallSiteThreshold;
+ std::optional<int> LocallyHotCallSiteThreshold;
/// Threshold to use when the callsite is considered cold.
- Optional<int> ColdCallSiteThreshold;
+ std::optional<int> ColdCallSiteThreshold;
/// Compute inline cost even when the cost has exceeded the threshold.
std::optional<bool> ComputeFullInlineCost;
std::optional<bool> AllowRecursiveCall = false;
};
-Optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind);
+std::optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind);
/// Generate the parameters to tune the inline cost analysis based only on the
/// commandline options.
/// directives or incompatibilities detectable without needing callee traversal.
/// Otherwise returns std::nullopt, meaning that inlining should be decided
/// based on other criteria (e.g. cost modeling).
-Optional<InlineResult> getAttributeBasedInliningDecision(
+std::optional<InlineResult> getAttributeBasedInliningDecision(
CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
/// returns:
/// - std::nullopt, if the inlining cannot happen (is illegal)
/// - an integer, representing the cost.
-Optional<int> getInliningCostEstimate(
+std::optional<int> getInliningCostEstimate(
CallBase &Call, TargetTransformInfo &CalleeTTI,
function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
/// Get the expanded cost features. The features are returned unconditionally,
/// even if inlining is impossible.
-Optional<InlineCostFeatures> getInliningCostFeatures(
+std::optional<InlineCostFeatures> getInliningCostFeatures(
CallBase &Call, TargetTransformInfo &CalleeTTI,
function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
~InlineSizeEstimatorAnalysis();
static AnalysisKey Key;
- using Result = Optional<size_t>;
+ using Result = std::optional<size_t>;
Result run(const Function &F, FunctionAnalysisManager &FAM);
static bool isEvaluatorRequested();
/// to \p PtrToStride and therefore add further predicates to \p PSE.
/// The \p Assume parameter indicates if we are allowed to make additional
/// run-time assumptions.
-Optional<int64_t>
+std::optional<int64_t>
getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
const Loop *Lp,
const ValueToValueMap &StridesMap = ValueToValueMap(),
/// is a simple API that does not depend on the analysis pass.
/// \param StrictCheck Ensure that the calculated distance matches the
/// type-based one after all the bitcasts removal in the provided pointers.
-Optional<int> getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
- Value *PtrB, const DataLayout &DL,
- ScalarEvolution &SE, bool StrictCheck = false,
- bool CheckType = true);
+std::optional<int> getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
+ Value *PtrB, const DataLayout &DL,
+ ScalarEvolution &SE,
+ bool StrictCheck = false,
+ bool CheckType = true);
/// Attempt to sort the pointers in \p VL and return the sorted indices
/// in \p SortedIndices, if reordering is required.
/// are/aren't in the same cache line of size \p CLS. Two references are in
/// the same chace line iff the distance between them in the innermost
/// dimension is less than the cache line size. Return std::nullopt if unsure.
- Optional<bool> hasSpacialReuse(const IndexedReference &Other, unsigned CLS,
- AAResults &AA) const;
+ std::optional<bool> hasSpacialReuse(const IndexedReference &Other,
+ unsigned CLS, AAResults &AA) const;
/// Return true if the current object and the indexed reference \p Other
/// have distance smaller than \p MaxDistance in the dimension associated with
/// the given loop \p L. Return false if the distance is not smaller than \p
/// MaxDistance and std::nullopt if unsure.
- Optional<bool> hasTemporalReuse(const IndexedReference &Other,
- unsigned MaxDistance, const Loop &L,
- DependenceInfo &DI, AAResults &AA) const;
+ std::optional<bool> hasTemporalReuse(const IndexedReference &Other,
+ unsigned MaxDistance, const Loop &L,
+ DependenceInfo &DI, AAResults &AA) const;
/// Compute the cost of the reference w.r.t. the given loop \p L when it is
/// considered in the innermost position in the loop nest.
/// classified to have temporal reuse.
CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI, ScalarEvolution &SE,
TargetTransformInfo &TTI, AAResults &AA, DependenceInfo &DI,
- Optional<unsigned> TRT = std::nullopt);
+ std::optional<unsigned> TRT = std::nullopt);
/// Create a CacheCost for the loop nest rooted by \p Root.
/// The optional parameter \p TRT can be used to specify the max. distance
/// classified to have temporal reuse.
static std::unique_ptr<CacheCost>
getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR, DependenceInfo &DI,
- Optional<unsigned> TRT = std::nullopt);
+ std::optional<unsigned> TRT = std::nullopt);
/// Return the estimated cost of loop \p L if the given loop is part of the
/// loop nest associated with this object. Return -1 otherwise.
/// - the final value of the induction variable can be found
///
/// Else None.
- static Optional<Loop::LoopBounds> getBounds(const Loop &L, PHINode &IndVar,
- ScalarEvolution &SE);
+ static std::optional<Loop::LoopBounds>
+ getBounds(const Loop &L, PHINode &IndVar, ScalarEvolution &SE);
/// Get the initial value of the loop induction variable.
Value &getInitialIVValue() const { return InitialIVValue; }
/// Return the struct LoopBounds collected if all struct members are found,
/// else std::nullopt.
- Optional<LoopBounds> getBounds(ScalarEvolution &SE) const;
+ std::optional<LoopBounds> getBounds(ScalarEvolution &SE) const;
/// Return the loop induction variable if found, else return nullptr.
/// An instruction is considered as the loop induction variable if
/// found, return nullptr.
MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);
-Optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
- StringRef Name);
-
+std::optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
+ StringRef Name);
+
/// Returns true if Name is applied to TheLoop and enabled.
bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
/// operand or null otherwise. If the string metadata is not found return
/// Optional's not-a-value.
-Optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
- StringRef Name);
+std::optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
+ StringRef Name);
/// Look for the loop attribute that requires progress within the loop.
/// Note: Most consumers probably want "isMustProgress" which checks
public:
po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
// These functions are defined below.
- bool insertEdge(Optional<BasicBlock *> From, BasicBlock *To);
+ bool insertEdge(std::optional<BasicBlock *> From, BasicBlock *To);
void finishPostorder(BasicBlock *BB);
};
};
inline bool po_iterator_storage<LoopBlocksTraversal, true>::insertEdge(
- Optional<BasicBlock *> From, BasicBlock *To) {
+ std::optional<BasicBlock *> From, BasicBlock *To) {
return LBT.visitPreorder(To);
}
return lastEvaluationResult()->getUntypedTensorValue(ExtraOutputIndex + 1);
}
- const Optional<TFModelEvaluator::EvaluationResult> &
+ const std::optional<TFModelEvaluator::EvaluationResult> &
lastEvaluationResult() const {
return LastEvaluationResult;
}
std::unique_ptr<TFModelEvaluator> Evaluator;
const std::vector<TensorSpec> OutputSpecs;
const std::vector<TensorSpec> ExtraOutputsForLogging;
- Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
+ std::optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
void *evaluateUntyped() override;
};
///}
/// Map to cache isGuaranteedToTransferExecutionToSuccessor results.
- DenseMap<const BasicBlock *, Optional<bool>> BlockTransferMap;
+ DenseMap<const BasicBlock *, std::optional<bool>> BlockTransferMap;
/// Map to cache containsIrreducibleCFG results.
- DenseMap<const Function*, Optional<bool>> IrreducibleControlMap;
+ DenseMap<const Function *, std::optional<bool>> IrreducibleControlMap;
/// Map from instructions to associated must be executed iterators.
DenseMap<const Instruction *, std::unique_ptr<MustBeExecutedIterator>>
/// This function returns operand bundle clang_arc_attachedcall's argument,
/// which is the address of the ARC runtime function.
-inline Optional<Function *> getAttachedARCFunction(const CallBase *CB) {
+inline std::optional<Function *> getAttachedARCFunction(const CallBase *CB) {
auto B = CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall);
if (!B)
return std::nullopt;
/// have the operand bundle or the operand is null. Otherwise it returns either
/// RetainRV or UnsafeClaimRV.
inline ARCInstKind getAttachedARCFunctionKind(const CallBase *CB) {
- Optional<Function *> Fn = getAttachedARCFunction(CB);
+ std::optional<Function *> Fn = getAttachedARCFunction(CB);
if (!Fn)
return ARCInstKind::None;
auto FnClass = GetFunctionClass(*Fn);
// percentile is above a large threshold.
std::optional<bool> HasLargeWorkingSetSize;
// Compute the threshold for a given cutoff.
- Optional<uint64_t> computeThreshold(int PercentileCutoff) const;
+ std::optional<uint64_t> computeThreshold(int PercentileCutoff) const;
// The map that caches the threshold values. The keys are the percentile
// cutoff values and the values are the corresponding threshold values.
mutable DenseMap<int, uint64_t> ThresholdCache;
// Not all EdgeRef have information about the source of the edge. Hence
// NodeRef corresponding to the source of the EdgeRef is explicitly passed.
- using GetProfCountTy = function_ref<Optional<Scaled64>(NodeRef, EdgeRef)>;
+ using GetProfCountTy =
+ function_ref<std::optional<Scaled64>(NodeRef, EdgeRef)>;
using AddCountTy = function_ref<void(NodeRef, Scaled64)>;
static void propagate(const CallGraphType &CG, GetProfCountTy GetProfCount,
/// "shape": <array of ints> }
/// For the "type" field, see the C++ primitive types used in
/// TFUTILS_SUPPORTED_TYPES.
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value);
+std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+ const json::Value &Value);
#define TFUTILS_GETDATATYPE_DEF(T, Name) \
template <> TensorType TensorSpec::getDataType<T>();
/// evaluation fails or the model is invalid, or an EvaluationResult
/// otherwise. The inputs are assumed to have been already provided via
/// getInput(). When returning std::nullopt, it also invalidates this object.
- Optional<EvaluationResult> evaluate();
+ std::optional<EvaluationResult> evaluate();
/// Provides access to the input vector.
template <typename T> T *getInput(size_t Index) {
return Range;
}
- Optional<APInt> asConstantInteger() const {
+ std::optional<APInt> asConstantInteger() const {
if (isConstant() && isa<ConstantInt>(getConstant())) {
return cast<ConstantInt>(getConstant())->getValue();
} else if (isConstantRange() && getConstantRange().isSingleElement()) {
/// T | T | F
/// F | T | T
/// (A)
-Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
- const DataLayout &DL, bool LHSIsTrue = true,
- unsigned Depth = 0);
-Optional<bool> isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
- const Value *RHSOp0, const Value *RHSOp1,
- const DataLayout &DL, bool LHSIsTrue = true,
- unsigned Depth = 0);
+std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ bool LHSIsTrue = true,
+ unsigned Depth = 0);
+std::optional<bool> isImpliedCondition(const Value *LHS,
+ CmpInst::Predicate RHSPred,
+ const Value *RHSOp0, const Value *RHSOp1,
+ const DataLayout &DL,
+ bool LHSIsTrue = true,
+ unsigned Depth = 0);
/// Return the boolean condition value in the context of the given instruction
/// if it is known based on dominating conditions.
-Optional<bool> isImpliedByDomCondition(const Value *Cond,
- const Instruction *ContextI,
- const DataLayout &DL);
-Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
- const Value *LHS, const Value *RHS,
- const Instruction *ContextI,
- const DataLayout &DL);
+std::optional<bool> isImpliedByDomCondition(const Value *Cond,
+ const Instruction *ContextI,
+ const DataLayout &DL);
+std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
+ const Value *LHS, const Value *RHS,
+ const Instruction *ContextI,
+ const DataLayout &DL);
/// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that
/// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In
/// this case offset would be -8.
-Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2,
- const DataLayout &DL);
+std::optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2,
+ const DataLayout &DL);
} // end namespace llvm
#endif // LLVM_ANALYSIS_VALUETRACKING_H
/// name. At the moment, this parameter is needed only to retrieve the
/// Vectorization Factor of scalable vector functions from their
/// respective IR declarations.
-Optional<VFInfo> tryDemangleForVFABI(StringRef MangledName, const Module &M);
+std::optional<VFInfo> tryDemangleForVFABI(StringRef MangledName,
+ const Module &M);
/// This routine mangles the given VectorName according to the LangRef
/// specification for vector-function-abi-variant attribute and is specific to
if (ListOfStrings.empty())
return;
for (const auto &MangledName : ListOfStrings) {
- const Optional<VFInfo> Shape =
+ const std::optional<VFInfo> Shape =
VFABI::tryDemangleForVFABI(MangledName, *(CI.getModule()));
// A match is found via scalar and vector names, and also by
// ensuring that the variant described in the attribute has a
}
ModRefInfo AAResults::getModRefInfo(const Instruction *I,
- const Optional<MemoryLocation> &OptLoc,
+ const std::optional<MemoryLocation> &OptLoc,
AAQueryInfo &AAQIP) {
if (OptLoc == std::nullopt) {
if (const auto *Call = dyn_cast<CallBase>(I))
return Attr & AliasAttrs(ExternalAttrMask);
}
-Optional<InstantiatedValue> instantiateInterfaceValue(InterfaceValue IValue,
- CallBase &Call) {
+std::optional<InstantiatedValue>
+instantiateInterfaceValue(InterfaceValue IValue, CallBase &Call) {
auto Index = IValue.Index;
auto *V = (Index == 0) ? &Call : Call.getArgOperand(Index - 1);
if (V->getType()->isPointerTy())
return std::nullopt;
}
-Optional<InstantiatedRelation>
+std::optional<InstantiatedRelation>
instantiateExternalRelation(ExternalRelation ERelation, CallBase &Call) {
auto From = instantiateInterfaceValue(ERelation.From, Call);
if (!From)
return InstantiatedRelation{*From, *To, ERelation.Offset};
}
-Optional<InstantiatedAttr> instantiateExternalAttribute(ExternalAttribute EAttr,
- CallBase &Call) {
+std::optional<InstantiatedAttr>
+instantiateExternalAttribute(ExternalAttribute EAttr, CallBase &Call) {
auto Value = instantiateInterfaceValue(EAttr.IValue, Call);
if (!Value)
return std::nullopt;
#define LLVM_ANALYSIS_ALIASANALYSISSUMMARY_H
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include <bitset>
+#include <optional>
namespace llvm {
Value *Val;
unsigned DerefLevel;
};
-Optional<InstantiatedValue> instantiateInterfaceValue(InterfaceValue IValue,
- CallBase &Call);
+std::optional<InstantiatedValue>
+instantiateInterfaceValue(InterfaceValue IValue, CallBase &Call);
inline bool operator==(InstantiatedValue LHS, InstantiatedValue RHS) {
return LHS.Val == RHS.Val && LHS.DerefLevel == RHS.DerefLevel;
InstantiatedValue From, To;
int64_t Offset;
};
-Optional<InstantiatedRelation>
+std::optional<InstantiatedRelation>
instantiateExternalRelation(ExternalRelation ERelation, CallBase &Call);
/// This is the result of instantiating ExternalAttribute at a particular
InstantiatedValue IValue;
AliasAttrs Attr;
};
-Optional<InstantiatedAttr> instantiateExternalAttribute(ExternalAttribute EAttr,
- CallBase &Call);
+std::optional<InstantiatedAttr>
+instantiateExternalAttribute(ExternalAttribute EAttr, CallBase &Call);
}
template <> struct DenseMapInfo<cflaa::InstantiatedValue> {
}
}
-Optional<uint32_t>
+std::optional<uint32_t>
BranchProbabilityInfo::getEstimatedBlockWeight(const BasicBlock *BB) const {
auto WeightIt = EstimatedBlockWeight.find(BB);
if (WeightIt == EstimatedBlockWeight.end())
return WeightIt->second;
}
-Optional<uint32_t>
+std::optional<uint32_t>
BranchProbabilityInfo::getEstimatedLoopWeight(const LoopData &L) const {
auto WeightIt = EstimatedLoopWeight.find(L);
if (WeightIt == EstimatedLoopWeight.end())
return WeightIt->second;
}
-Optional<uint32_t>
+std::optional<uint32_t>
BranchProbabilityInfo::getEstimatedEdgeWeight(const LoopEdge &Edge) const {
// For edges entering a loop take weight of a loop rather than an individual
// block in the loop.
}
template <class IterT>
-Optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight(
+std::optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight(
const LoopBlock &SrcLoopBB, iterator_range<IterT> Successors) const {
SmallVector<uint32_t, 4> Weights;
- Optional<uint32_t> MaxWeight;
+ std::optional<uint32_t> MaxWeight;
for (const BasicBlock *DstBB : Successors) {
const LoopBlock DstLoopBB = getLoopBlock(DstBB);
auto Weight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB});
}
}
-Optional<uint32_t> BranchProbabilityInfo::getInitialEstimatedBlockWeight(
- const BasicBlock *BB) {
+std::optional<uint32_t>
+BranchProbabilityInfo::getInitialEstimatedBlockWeight(const BasicBlock *BB) {
// Returns true if \p BB has call marked with "NoReturn" attribute.
auto hasNoReturn = [&](const BasicBlock *BB) {
for (const auto &I : reverse(*BB))
uint64_t TotalWeight = 0;
// Go over all successors of BB and put their weights into SuccWeights.
for (const BasicBlock *SuccBB : successors(BB)) {
- Optional<uint32_t> Weight;
+ std::optional<uint32_t> Weight;
const LoopBlock SuccLoopBB = getLoopBlock(SuccBB);
const LoopEdge Edge{LoopBB, SuccLoopBB};
std::unique_ptr<MLInlineAdvice>
getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
- Optional<size_t> getNativeSizeEstimate(const Function &F) const;
+ std::optional<size_t> getNativeSizeEstimate(const Function &F) const;
private:
bool isLogging() const { return !!Logger; }
const bool IsDoingInference;
std::unique_ptr<TrainingLogger> Logger;
- const Optional<int32_t> InitialNativeSize;
- Optional<int32_t> CurrentNativeSize;
+ const std::optional<int32_t> InitialNativeSize;
+ std::optional<int32_t> CurrentNativeSize;
};
/// A variant of MLInlineAdvice that tracks all non-trivial inlining
LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
OptimizationRemarkEmitter &ORE, bool Recommendation,
TrainingLogger &Logger,
- Optional<size_t> CallerSizeEstimateBefore,
- Optional<size_t> CalleeSizeEstimateBefore,
+ std::optional<size_t> CallerSizeEstimateBefore,
+ std::optional<size_t> CalleeSizeEstimateBefore,
bool DefaultDecision, bool Mandatory = false)
: MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
CallerSizeEstimateBefore(CallerSizeEstimateBefore),
static const int64_t NoReward = 0;
TrainingLogger &Logger;
- const Optional<size_t> CallerSizeEstimateBefore;
- const Optional<size_t> CalleeSizeEstimateBefore;
+ const std::optional<size_t> CallerSizeEstimateBefore;
+ const std::optional<size_t> CalleeSizeEstimateBefore;
const int64_t DefaultDecision;
const int64_t Mandatory;
};
Logger->print();
}
-Optional<size_t>
+std::optional<size_t>
DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
return std::nullopt;
PreservedAnalyses
IRSimilarityAnalysisPrinterPass::run(Module &M, ModuleAnalysisManager &AM) {
IRSimilarityIdentifier &IRSI = AM.getResult<IRSimilarityAnalysis>(M);
- Optional<SimilarityGroupList> &SimilarityCandidatesOpt = IRSI.getSimilarity();
+ std::optional<SimilarityGroupList> &SimilarityCandidatesOpt =
+ IRSI.getSimilarity();
for (std::vector<IRSimilarityCandidate> &CandVec : *SimilarityCandidatesOpt) {
OS << CandVec.size() << " candidates of length "
Advisor->getAnnotatedInlinePassName());
}
-llvm::Optional<llvm::InlineCost> static getDefaultInlineAdvice(
+std::optional<llvm::InlineCost> static getDefaultInlineAdvice(
CallBase &CB, FunctionAnalysisManager &FAM, const InlineParams &Params) {
Function &Caller = *CB.getCaller();
ProfileSummaryInfo *PSI =
/// CallSite. If we return the cost, we will emit an optimisation remark later
/// using that cost, so we won't do so from this function. Return std::nullopt
/// if inlining should not be attempted.
-Optional<InlineCost>
+std::optional<InlineCost>
llvm::shouldInline(CallBase &CB,
function_ref<InlineCost(CallBase &CB)> GetInlineCost,
OptimizationRemarkEmitter &ORE, bool EnableDeferral) {
}
InlineAdvisor::InlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
- Optional<InlineContext> IC)
+ std::optional<InlineContext> IC)
: M(M), FAM(FAM), IC(IC),
AnnotatedInlinePassName((IC && AnnotateInlinePhase)
? llvm::AnnotateInlinePassName(*IC)
cl::desc("Disables evaluation of GetElementPtr with constant operands"));
namespace llvm {
-Optional<int> getStringFnAttrAsInt(const Attribute &Attr) {
+std::optional<int> getStringFnAttrAsInt(const Attribute &Attr) {
if (Attr.isValid()) {
int AttrValue = 0;
if (!Attr.getValueAsString().getAsInteger(10, AttrValue))
return std::nullopt;
}
-Optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind) {
+std::optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind) {
return getStringFnAttrAsInt(CB.getFnAttr(AttrKind));
}
-Optional<int> getStringFnAttrAsInt(Function *F, StringRef AttrKind) {
+std::optional<int> getStringFnAttrAsInt(Function *F, StringRef AttrKind) {
return getStringFnAttrAsInt(F->getFnAttribute(AttrKind));
}
bool DecidedByCostBenefit = false;
// The cost-benefit pair computed by cost-benefit analysis.
- Optional<CostBenefitPair> CostBenefit = std::nullopt;
+ std::optional<CostBenefitPair> CostBenefit = std::nullopt;
bool SingleBB = true;
/// analysis.
void updateThreshold(CallBase &Call, Function &Callee);
/// Return a higher threshold if \p Call is a hot callsite.
- Optional<int> getHotCallSiteThreshold(CallBase &Call,
- BlockFrequencyInfo *CallerBFI);
+ std::optional<int> getHotCallSiteThreshold(CallBase &Call,
+ BlockFrequencyInfo *CallerBFI);
/// Handle a capped 'int' increment for Cost.
void addCost(int64_t Inc) {
}
bool onCallBaseVisitStart(CallBase &Call) override {
- if (Optional<int> AttrCallThresholdBonus =
+ if (std::optional<int> AttrCallThresholdBonus =
getStringFnAttrAsInt(Call, "call-threshold-bonus"))
Threshold += *AttrCallThresholdBonus;
- if (Optional<int> AttrCallCost =
+ if (std::optional<int> AttrCallCost =
getStringFnAttrAsInt(Call, "call-inline-cost")) {
addCost(*AttrCallCost);
// Prevent further processing of the call since we want to override its
else if (NumVectorInstructions <= NumInstructions / 2)
Threshold -= VectorBonus / 2;
- if (Optional<int> AttrCost =
+ if (std::optional<int> AttrCost =
getStringFnAttrAsInt(CandidateCall, "function-inline-cost"))
Cost = *AttrCost;
- if (Optional<int> AttrCostMult = getStringFnAttrAsInt(
+ if (std::optional<int> AttrCostMult = getStringFnAttrAsInt(
CandidateCall,
InlineConstants::FunctionInlineCostMultiplierAttributeName))
Cost *= *AttrCostMult;
- if (Optional<int> AttrThreshold =
+ if (std::optional<int> AttrThreshold =
getStringFnAttrAsInt(CandidateCall, "function-inline-threshold"))
Threshold = *AttrThreshold;
// on the build.
void print(raw_ostream &OS);
- Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
+ std::optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
return InstructionCostDetailMap[I];
return std::nullopt;
int getThreshold() const { return Threshold; }
int getCost() const { return Cost; }
int getStaticBonusApplied() const { return StaticBonusApplied; }
- Optional<CostBenefitPair> getCostBenefitPair() { return CostBenefit; }
+ std::optional<CostBenefitPair> getCostBenefitPair() { return CostBenefit; }
bool wasDecidedByCostBenefit() const { return DecidedByCostBenefit; }
bool wasDecidedByCostThreshold() const { return DecidedByCostThreshold; }
};
// The cost of inlining of the given instruction is printed always.
// The threshold delta is printed only when it is non-zero. It happens
// when we decided to give a bonus at a particular instruction.
- Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
+ std::optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
if (!Record)
OS << "; No analysis for the instruction";
else {
return CallSiteFreq < CallerEntryFreq * ColdProb;
}
-Optional<int>
+std::optional<int>
InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
BlockFrequencyInfo *CallerBFI) {
Function *Caller = Call.getCaller();
// return min(A, B) if B is valid.
- auto MinIfValid = [](int A, Optional<int> B) {
+ auto MinIfValid = [](int A, std::optional<int> B) {
return B ? std::min(A, B.value()) : A;
};
// return max(A, B) if B is valid.
- auto MaxIfValid = [](int A, Optional<int> B) {
+ auto MaxIfValid = [](int A, std::optional<int> B) {
return B ? std::max(A, B.value()) : A;
};
// The command line option overrides a limit set in the function attributes.
size_t FinalStackSizeThreshold = StackSizeThreshold;
if (!StackSizeThreshold.getNumOccurrences())
- if (Optional<int> AttrMaxStackSize = getStringFnAttrAsInt(
+ if (std::optional<int> AttrMaxStackSize = getStringFnAttrAsInt(
Caller, InlineConstants::MaxInlineStackSizeAttributeName))
FinalStackSizeThreshold = *AttrMaxStackSize;
if (AllocatedSize > FinalStackSizeThreshold)
GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
}
-Optional<int> llvm::getInliningCostEstimate(
+std::optional<int> llvm::getInliningCostEstimate(
CallBase &Call, TargetTransformInfo &CalleeTTI,
function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
return CA.getCost();
}
-Optional<InlineCostFeatures> llvm::getInliningCostFeatures(
+std::optional<InlineCostFeatures> llvm::getInliningCostFeatures(
CallBase &Call, TargetTransformInfo &CalleeTTI,
function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
return CFA.features();
}
-Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
+std::optional<InlineResult> llvm::getAttributeBasedInliningDecision(
CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
private:
int Cost = INT_MAX;
int StaticBonusApplied = 0;
- Optional<CostBenefitPair> CostBenefit;
+ std::optional<CostBenefitPair> CostBenefit;
};
class MLPriority {
if (MaxRecurse != RecursionLimit)
return nullptr;
- Optional<bool> Imp =
+ std::optional<bool> Imp =
isImpliedByDomCondition(CmpInst::ICMP_EQ, Op0, Op1, Q.CxtI, Q.DL);
if (Imp && *Imp) {
Type *Ty = Op0->getType();
return Constant::getNullValue(Op0->getType());
if (Op0->getType()->isIntOrIntVectorTy(1)) {
- if (Optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
+ if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
// If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
if (*Implied == true)
return Op0;
if (*Implied == false)
return ConstantInt::getFalse(Op0->getType());
}
- if (Optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
+ if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
// If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
if (Implied.value())
return Op1;
return V;
if (Op0->getType()->isIntOrIntVectorTy(1)) {
- if (Optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL, false)) {
+ if (std::optional<bool> Implied =
+ isImpliedCondition(Op0, Op1, Q.DL, false)) {
// If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
if (*Implied == false)
return Op0;
if (*Implied == true)
return ConstantInt::getTrue(Op0->getType());
}
- if (Optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL, false)) {
+ if (std::optional<bool> Implied =
+ isImpliedCondition(Op1, Op0, Q.DL, false)) {
// If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
if (*Implied == false)
return Op1;
continue;
CallInst *Assume = cast<CallInst>(AssumeVH);
- if (Optional<bool> Imp = isImpliedCondition(Assume->getArgOperand(0),
- Predicate, LHS, RHS, Q.DL))
+ if (std::optional<bool> Imp = isImpliedCondition(
+ Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
return ConstantInt::get(getCompareTy(LHS), *Imp);
}
if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
return V;
- Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
+ std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
if (Imp)
return *Imp ? TrueVal : FalseVal;
if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
return Op1;
- if (Optional<bool> Imp =
+ if (std::optional<bool> Imp =
isImpliedByDomCondition(Pred, Op0, Op1, Q.CxtI, Q.DL))
return *Imp ? Op0 : Op1;
- if (Optional<bool> Imp =
+ if (std::optional<bool> Imp =
isImpliedByDomCondition(Pred, Op1, Op0, Q.CxtI, Q.DL))
return *Imp ? Op1 : Op0;
Value *Op = Usr->getOperand(i);
ValueLatticeElement OpLatticeVal =
getValueFromCondition(Op, Condition, isTrueDest);
- if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) {
+ if (std::optional<APInt> OpConst =
+ OpLatticeVal.asConstantInteger()) {
Result = constantFoldUser(Usr, Op, *OpConst, DL);
break;
}
}
/// Check whether the access through \p Ptr has a constant stride.
-Optional<int64_t>
-llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
- Value *Ptr, const Loop *Lp,
- const ValueToValueMap &StridesMap, bool Assume,
- bool ShouldCheckWrap) {
+std::optional<int64_t> llvm::getPtrStride(PredicatedScalarEvolution &PSE,
+ Type *AccessTy, Value *Ptr,
+ const Loop *Lp,
+ const ValueToValueMap &StridesMap,
+ bool Assume, bool ShouldCheckWrap) {
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
return Stride;
}
-Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
- Value *PtrB, const DataLayout &DL,
- ScalarEvolution &SE, bool StrictCheck,
- bool CheckType) {
+std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
+ Type *ElemTyB, Value *PtrB,
+ const DataLayout &DL,
+ ScalarEvolution &SE, bool StrictCheck,
+ bool CheckType) {
assert(PtrA && PtrB && "Expected non-nullptr pointers.");
assert(cast<PointerType>(PtrA->getType())
->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type");
int Cnt = 1;
bool IsConsecutive = true;
for (auto *Ptr : VL.drop_front()) {
- Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
- /*StrictCheck=*/true);
+ std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
+ /*StrictCheck=*/true);
if (!Diff)
return false;
return false;
Type *ElemTyA = getLoadStoreType(A);
Type *ElemTyB = getLoadStoreType(B);
- Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
- /*StrictCheck=*/true, CheckType);
+ std::optional<int> Diff =
+ getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
+ /*StrictCheck=*/true, CheckType);
return Diff && *Diff == 1;
}
<< "\n");
}
-Optional<bool> IndexedReference::hasSpacialReuse(const IndexedReference &Other,
- unsigned CLS,
- AAResults &AA) const {
+std::optional<bool>
+IndexedReference::hasSpacialReuse(const IndexedReference &Other, unsigned CLS,
+ AAResults &AA) const {
assert(IsValid && "Expecting a valid reference");
if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
return InSameCacheLine;
}
-Optional<bool> IndexedReference::hasTemporalReuse(const IndexedReference &Other,
- unsigned MaxDistance,
- const Loop &L,
- DependenceInfo &DI,
- AAResults &AA) const {
+std::optional<bool>
+IndexedReference::hasTemporalReuse(const IndexedReference &Other,
+ unsigned MaxDistance, const Loop &L,
+ DependenceInfo &DI, AAResults &AA) const {
assert(IsValid && "Expecting a valid reference");
if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI,
ScalarEvolution &SE, TargetTransformInfo &TTI,
- AAResults &AA, DependenceInfo &DI, Optional<unsigned> TRT)
+ AAResults &AA, DependenceInfo &DI,
+ std::optional<unsigned> TRT)
: Loops(Loops), TRT(TRT.value_or(TemporalReuseThreshold)), LI(LI), SE(SE),
TTI(TTI), AA(AA), DI(DI) {
assert(!Loops.empty() && "Expecting a non-empty loop vector.");
std::unique_ptr<CacheCost>
CacheCost::getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR,
- DependenceInfo &DI, Optional<unsigned> TRT) {
+ DependenceInfo &DI, std::optional<unsigned> TRT) {
if (!Root.isOutermost()) {
LLVM_DEBUG(dbgs() << "Expecting the outermost loop in a loop nest\n");
return nullptr;
// when in actuality, depending on the array size, the first example
// should have a cost closer to 2x the second due to the two cache
// access per iteration from opposite ends of the array
- Optional<bool> HasTemporalReuse =
+ std::optional<bool> HasTemporalReuse =
R->hasTemporalReuse(Representative, *TRT, *InnerMostLoop, DI, AA);
- Optional<bool> HasSpacialReuse =
+ std::optional<bool> HasSpacialReuse =
R->hasSpacialReuse(Representative, CLS, AA);
if ((HasTemporalReuse && *HasTemporalReuse) ||
return nullptr;
}
-Optional<Loop::LoopBounds> Loop::LoopBounds::getBounds(const Loop &L,
- PHINode &IndVar,
- ScalarEvolution &SE) {
+std::optional<Loop::LoopBounds>
+Loop::LoopBounds::getBounds(const Loop &L, PHINode &IndVar,
+ ScalarEvolution &SE) {
InductionDescriptor IndDesc;
if (!InductionDescriptor::isInductionPHI(&IndVar, &L, &SE, IndDesc))
return std::nullopt;
return Direction::Unknown;
}
-Optional<Loop::LoopBounds> Loop::getBounds(ScalarEvolution &SE) const {
+std::optional<Loop::LoopBounds> Loop::getBounds(ScalarEvolution &SE) const {
if (PHINode *IndVar = getInductionVariable(SE))
return LoopBounds::getBounds(*this, *IndVar, SE);
/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
/// operand or null otherwise. If the string metadata is not found return
/// Optional's not-a-value.
-Optional<const MDOperand *> llvm::findStringMetadataForLoop(const Loop *TheLoop,
- StringRef Name) {
+std::optional<const MDOperand *>
+llvm::findStringMetadataForLoop(const Loop *TheLoop, StringRef Name) {
MDNode *MD = findOptionMDForLoop(TheLoop, Name);
if (!MD)
return std::nullopt;
}
}
-Optional<bool> llvm::getOptionalBoolLoopAttribute(const Loop *TheLoop,
- StringRef Name) {
+std::optional<bool> llvm::getOptionalBoolLoopAttribute(const Loop *TheLoop,
+ StringRef Name) {
MDNode *MD = findOptionMDForLoop(TheLoop, Name);
if (!MD)
return std::nullopt;
static bool checkSafeInstruction(const Instruction &I,
const CmpInst *InnerLoopGuardCmp,
const CmpInst *OuterLoopLatchCmp,
- Optional<Loop::LoopBounds> OuterLoopLB) {
+ std::optional<Loop::LoopBounds> OuterLoopLB) {
bool IsAllowed =
isSafeToSpeculativelyExecute(&I) || isa<PHINode>(I) || isa<BranchInst>(I);
// Is the error status of posix_memalign correctly checked? If not it
// would be incorrect to assume it succeeds and load doesn't see the
// previous value.
- Optional<bool> Checked = isImpliedByDomCondition(
+ std::optional<bool> Checked = isImpliedByDomCondition(
ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL);
if (!Checked || !*Checked)
return Unknown();
// First. Also note that First and Last are inclusive.
MemoryAccess *First;
MemoryAccess *Last;
- Optional<ListIndex> Previous;
+ std::optional<ListIndex> Previous;
DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
- Optional<ListIndex> Previous)
+ std::optional<ListIndex> Previous)
: Loc(Loc), First(First), Last(Last), Previous(Previous) {}
DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
- Optional<ListIndex> Previous)
+ std::optional<ListIndex> Previous)
: DefPath(Loc, Init, Init, Previous) {}
};
/// If this returns std::nullopt, NewPaused is a vector of searches that
/// terminated at StopWhere. Otherwise, NewPaused is left in an unspecified
/// state.
- Optional<TerminatedPath>
+ std::optional<TerminatedPath>
getBlockingAccess(const MemoryAccess *StopWhere,
SmallVectorImpl<ListIndex> &PausedSearches,
SmallVectorImpl<ListIndex> &NewPaused,
T &curNode() const { return W->Paths[*N]; }
Walker *W = nullptr;
- Optional<ListIndex> N = std::nullopt;
+ std::optional<ListIndex> N = std::nullopt;
};
using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
// FIXME: This is broken, because the Blocker may be reported to be
// liveOnEntry, and we'll happily wait for that to disappear (read: never)
// For the moment, this is fine, since we do nothing with blocker info.
- if (Optional<TerminatedPath> Blocker = getBlockingAccess(
+ if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
Target, PausedSearches, NewPaused, TerminatedPaths)) {
// Find the node we started at. We can't search based on N->Last, since
std::optional<std::string> LoggingName;
};
-Optional<std::vector<LoggedFeatureSpec>>
+std::optional<std::vector<LoggedFeatureSpec>>
loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
StringRef ModelPath, StringRef SpecFileOverride) {
SmallVector<char, 128> OutputSpecsPath;
/// Lookup \p Key in \p Map and return the result, potentially after
/// initializing the optional through \p Fn(\p args).
template <typename K, typename V, typename FnTy, typename... ArgsTy>
-static V getOrCreateCachedOptional(K Key, DenseMap<K, Optional<V>> &Map,
- FnTy &&Fn, ArgsTy&&... args) {
- Optional<V> &OptVal = Map[Key];
+static V getOrCreateCachedOptional(K Key, DenseMap<K, std::optional<V>> &Map,
+ FnTy &&Fn, ArgsTy &&...args) {
+ std::optional<V> &OptVal = Map[Key];
if (!OptVal)
OptVal = Fn(std::forward<ArgsTy>(args)...);
return OptVal.value();
}
}
-Optional<uint64_t>
+std::optional<uint64_t>
ProfileSummaryInfo::computeThreshold(int PercentileCutoff) const {
if (!hasProfileSummary())
return std::nullopt;
} else {
LLVM_DEBUG(dbgs() << "Replay Inliner: Not Inlined " << Callee << " @ "
<< CallSiteLoc << "\n");
- // A negative inline is conveyed by "None" Optional<InlineCost>
+ // A negative inline is conveyed by "None" std::optional<InlineCost>
return std::make_unique<DefaultInlineAdvice>(this, CB, std::nullopt, ORE,
EmitRemarks);
}
EmitRemarks);
else if (ReplaySettings.ReplayFallback ==
ReplayInlinerSettings::Fallback::NeverInline)
- // A negative inline is conveyed by "None" Optional<InlineCost>
+ // A negative inline is conveyed by "None" std::optional<InlineCost>
return std::make_unique<DefaultInlineAdvice>(this, CB, std::nullopt, ORE,
EmitRemarks);
else {
/// This is a value used to signify "does not exist" where the
/// StratifiedIndex type is used.
///
- /// This is used instead of Optional<StratifiedIndex> because
- /// Optional<StratifiedIndex> would eat up a considerable amount of extra
+ /// This is used instead of std::optional<StratifiedIndex> because
+ /// std::optional<StratifiedIndex> would eat up a considerable amount of extra
/// memory, after struct padding/alignment is taken into account.
static const StratifiedIndex SetSentinel;
std::vector<StratifiedLink> Links)
: Values(std::move(Map)), Links(std::move(Links)) {}
- Optional<StratifiedInfo> find(const T &Elem) const {
+ std::optional<StratifiedInfo> find(const T &Elem) const {
auto Iter = Values.find(Elem);
if (Iter == Values.end())
return std::nullopt;
return true;
}
- Optional<const StratifiedInfo *> get(const T &Val) const {
+ std::optional<const StratifiedInfo *> get(const T &Val) const {
auto Result = Values.find(Val);
if (Result == Values.end())
return std::nullopt;
return &Result->second;
}
- Optional<StratifiedInfo *> get(const T &Val) {
+ std::optional<StratifiedInfo *> get(const T &Val) {
auto Result = Values.find(Val);
if (Result == Values.end())
return std::nullopt;
return &Result->second;
}
- Optional<StratifiedIndex> indexOf(const T &Val) {
+ std::optional<StratifiedIndex> indexOf(const T &Val) {
auto MaybeVal = get(Val);
if (!MaybeVal)
return std::nullopt;
return IsValid;
}
-Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
+std::optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
if (!isValid())
return std::nullopt;
return EvaluationResult(Impl->evaluate());
std::multiplies<int64_t>())),
ElementSize(ElementSize) {}
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value) {
- auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
+std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+ const json::Value &Value) {
+ auto EmitError =
+ [&](const llvm::Twine &Message) -> std::optional<TensorSpec> {
std::string S;
llvm::raw_string_ostream OS(S);
OS << Value;
// Format of the ABI name:
// _ZGV<isa><mask><vlen><parameters>_<scalarname>[(<redirection>)]
-Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
- const Module &M) {
+std::optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
+ const Module &M) {
const StringRef OriginalName = MangledName;
// Assume there is no custom name <redirection>, and therefore the
// vector name consists of
/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
/// ALHS ARHS" is true. Otherwise, return std::nullopt.
-static Optional<bool>
+static std::optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
const Value *ARHS, const Value *BLHS, const Value *BRHS,
const DataLayout &DL, unsigned Depth) {
/// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
/// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
/// Otherwise, return std::nullopt if we can't infer anything.
-static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
- CmpInst::Predicate RPred,
- bool AreSwappedOps) {
+static std::optional<bool>
+isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
+ CmpInst::Predicate RPred, bool AreSwappedOps) {
// Canonicalize the predicate as if the operands were not commuted.
if (AreSwappedOps)
RPred = ICmpInst::getSwappedPredicate(RPred);
/// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
/// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
/// Otherwise, return std::nullopt if we can't infer anything.
-static Optional<bool> isImpliedCondCommonOperandWithConstants(
+static std::optional<bool> isImpliedCondCommonOperandWithConstants(
CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
const APInt &RC) {
ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
/// is true. Return false if LHS implies RHS is false. Otherwise, return
/// std::nullopt if we can't infer anything.
-static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
- CmpInst::Predicate RPred,
- const Value *R0, const Value *R1,
- const DataLayout &DL, bool LHSIsTrue,
- unsigned Depth) {
+static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
+ CmpInst::Predicate RPred,
+ const Value *R0, const Value *R1,
+ const DataLayout &DL,
+ bool LHSIsTrue, unsigned Depth) {
Value *L0 = LHS->getOperand(0);
Value *L1 = LHS->getOperand(1);
/// false. Otherwise, return std::nullopt if we can't infer anything. We
/// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
/// instruction.
-static Optional<bool>
+static std::optional<bool>
isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
const Value *RHSOp0, const Value *RHSOp1,
const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
(LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
// FIXME: Make this non-recursion.
- if (Optional<bool> Implication = isImpliedCondition(
+ if (std::optional<bool> Implication = isImpliedCondition(
ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
return Implication;
- if (Optional<bool> Implication = isImpliedCondition(
+ if (std::optional<bool> Implication = isImpliedCondition(
ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
return Implication;
return std::nullopt;
return std::nullopt;
}
-Optional<bool>
+std::optional<bool>
llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
const Value *RHSOp0, const Value *RHSOp1,
const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
return std::nullopt;
}
-Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
- const DataLayout &DL, bool LHSIsTrue,
- unsigned Depth) {
+std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ bool LHSIsTrue, unsigned Depth) {
// LHS ==> RHS by definition
if (LHS == RHS)
return LHSIsTrue;
// LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
const Value *RHS1, *RHS2;
if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
- if (Optional<bool> Imp =
+ if (std::optional<bool> Imp =
isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
if (*Imp == true)
return true;
- if (Optional<bool> Imp =
+ if (std::optional<bool> Imp =
isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
if (*Imp == true)
return true;
}
if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
- if (Optional<bool> Imp =
+ if (std::optional<bool> Imp =
isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
if (*Imp == false)
return false;
- if (Optional<bool> Imp =
+ if (std::optional<bool> Imp =
isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
if (*Imp == false)
return false;
return {PredCond, TrueBB == ContextBB};
}
-Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
- const Instruction *ContextI,
- const DataLayout &DL) {
+std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
+ const Instruction *ContextI,
+ const DataLayout &DL) {
assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
auto PredCond = getDomPredecessorCondition(ContextI);
if (PredCond.first)
return std::nullopt;
}
-Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
- const Value *LHS, const Value *RHS,
- const Instruction *ContextI,
- const DataLayout &DL) {
+std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
+ const Value *LHS,
+ const Value *RHS,
+ const Instruction *ContextI,
+ const DataLayout &DL) {
auto PredCond = getDomPredecessorCondition(ContextI);
if (PredCond.first)
return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
return Offset;
}
-Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
- const DataLayout &DL) {
+std::optional<int64_t> llvm::isPointerOffset(const Value *Ptr1,
+ const Value *Ptr2,
+ const DataLayout &DL) {
APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
for (const auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
#ifndef NDEBUG
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
- Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
+ std::optional<VFInfo> Info =
+ VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
assert(Info && "Invalid name for a VFABI variant.");
assert(CI.getModule()->getFunction(Info.value().VectorName) &&
"Vector function is missing.");
#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
void finishPostorder(const MachineBasicBlock*) {}
- bool insertEdge(Optional<const MachineBasicBlock *> From,
+ bool insertEdge(std::optional<const MachineBasicBlock *> From,
const MachineBasicBlock *To) {
// Skip already visited To blocks.
MachineTraceMetrics::TraceBlockInfo &TBI = LB.Blocks[To->getNumber()];
break;
// Check to see if this store is to a constant offset from the start ptr.
- Optional<int64_t> Offset =
+ std::optional<int64_t> Offset =
isPointerOffset(StartPtr, NextStore->getPointerOperand(), *DL);
if (!Offset)
break;
break;
// Check to see if this store is to a constant offset from the start ptr.
- Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), *DL);
+ std::optional<int64_t> Offset =
+ isPointerOffset(StartPtr, MSI->getDest(), *DL);
if (!Offset)
break;
Value *OutlinableRegion::findCorrespondingValueIn(const OutlinableRegion &Other,
Value *V) {
- Optional<unsigned> GVN = Candidate->getGVN(V);
+ std::optional<unsigned> GVN = Candidate->getGVN(V);
assert(GVN && "No GVN for incoming value");
- Optional<unsigned> CanonNum = Candidate->getCanonicalNum(*GVN);
- Optional<unsigned> FirstGVN = Other.Candidate->fromCanonicalNum(*CanonNum);
- Optional<Value *> FoundValueOpt = Other.Candidate->fromGVN(*FirstGVN);
+ std::optional<unsigned> CanonNum = Candidate->getCanonicalNum(*GVN);
+ std::optional<unsigned> FirstGVN =
+ Other.Candidate->fromCanonicalNum(*CanonNum);
+ std::optional<Value *> FoundValueOpt = Other.Candidate->fromGVN(*FirstGVN);
return FoundValueOpt.value_or(nullptr);
}
// assigned by the IRSimilarityCandidate, has been seen before, we check if
// the the number has been found to be not the same value in each instance.
for (Value *V : ID.OperVals) {
- Optional<unsigned> GVNOpt = C.getGVN(V);
+ std::optional<unsigned> GVNOpt = C.getGVN(V);
assert(GVNOpt && "Expected a GVN for operand?");
unsigned GVN = GVNOpt.value();
// we find argument locations for the canonical value numbering. This
// numbering overrides any discovered location for the extracted code.
for (unsigned InputVal : InputGVNs) {
- Optional<unsigned> CanonicalNumberOpt = C.getCanonicalNum(InputVal);
+ std::optional<unsigned> CanonicalNumberOpt = C.getCanonicalNum(InputVal);
assert(CanonicalNumberOpt && "Canonical number not found?");
unsigned CanonicalNumber = CanonicalNumberOpt.value();
- Optional<Value *> InputOpt = C.fromGVN(InputVal);
+ std::optional<Value *> InputOpt = C.fromGVN(InputVal);
assert(InputOpt && "Global value number not found?");
Value *Input = InputOpt.value();
/// \param AggArgIdx - The argument \p PN will be stored into.
/// \returns An optional holding the assigned canonical number, or std::nullopt
/// if there is some attribute of the PHINode blocking it from being used.
-static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
- PHINode *PN,
- DenseSet<BasicBlock *> &Blocks,
- unsigned AggArgIdx) {
+static std::optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
+ PHINode *PN,
+ DenseSet<BasicBlock *> &Blocks,
+ unsigned AggArgIdx) {
OutlinableGroup &Group = *Region.Parent;
IRSimilarityCandidate &Cand = *Region.Candidate;
BasicBlock *PHIBB = PN->getParent();
// are trying to analyze, meaning, that if it was outlined, we would be
// adding an extra input. We ignore this case for now, and so ignore the
// region.
- Optional<unsigned> OGVN = Cand.getGVN(Incoming);
+ std::optional<unsigned> OGVN = Cand.getGVN(Incoming);
if (!OGVN && Blocks.contains(IncomingBlock)) {
Region.IgnoreRegion = true;
return std::nullopt;
// PHINode to generate a hash value representing this instance of the PHINode.
DenseMap<hash_code, unsigned>::iterator GVNToPHIIt;
DenseMap<unsigned, PHINodeData>::iterator PHIToGVNIt;
- Optional<unsigned> BBGVN = Cand.getGVN(PHIBB);
+ std::optional<unsigned> BBGVN = Cand.getGVN(PHIBB);
assert(BBGVN && "Could not find GVN for the incoming block!");
BBGVN = Cand.getCanonicalNum(BBGVN.value());
// TODO: Adapt to the extra input from the PHINode.
PHINode *PN = dyn_cast<PHINode>(Output);
- Optional<unsigned> GVN;
+ std::optional<unsigned> GVN;
if (PN && !BlocksInRegion.contains(PN->getParent())) {
// Values outside the region can be combined into PHINode when we
// have multiple exits. We collect both of these into a list to identify
IVal = findOutputMapping(OutputMappings, IVal);
// Find and add the canonical number for the incoming value.
- Optional<unsigned> GVN = Region.Candidate->getGVN(IVal);
+ std::optional<unsigned> GVN = Region.Candidate->getGVN(IVal);
assert(GVN && "No GVN for incoming value");
- Optional<unsigned> CanonNum = Region.Candidate->getCanonicalNum(*GVN);
+ std::optional<unsigned> CanonNum = Region.Candidate->getCanonicalNum(*GVN);
assert(CanonNum && "No Canonical Number for GVN");
CanonNums.push_back(std::make_pair(*CanonNum, IBlock));
}
assert(It->second.second.size() > 0 && "PHINode does not have any values!");
OutputCanon = *It->second.second.begin();
}
- Optional<unsigned> OGVN = Region.Candidate->fromCanonicalNum(OutputCanon);
+ std::optional<unsigned> OGVN =
+ Region.Candidate->fromCanonicalNum(OutputCanon);
assert(OGVN && "Could not find GVN for Canonical Number?");
- Optional<Value *> OV = Region.Candidate->fromGVN(*OGVN);
+ std::optional<Value *> OV = Region.Candidate->fromGVN(*OGVN);
assert(OV && "Could not find value for GVN?");
return *OV;
}
// parameter.
auto GetCallSiteProfCount = [&](const CallGraphNode *,
const CallGraphNode::CallRecord &Edge) {
- Optional<Scaled64> Res;
+ std::optional<Scaled64> Res;
if (!Edge.first)
return Res;
CallBase &CB = *cast<CallBase>(*Edge.first);
Scaled64 BBCount(BFI.getBlockFreq(CSBB).getFrequency(), 0);
BBCount /= EntryFreq;
BBCount *= Counts[Caller];
- return Optional<Scaled64>(BBCount);
+ return std::optional<Scaled64>(BBCount);
};
CallGraph CG(M);
return nullptr;
}
-static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI,
- const DataLayout &DL, AssumptionCache *AC,
- DominatorTree *DT) {
+static std::optional<bool> getKnownSign(Value *Op, Instruction *CxtI,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ DominatorTree *DT) {
KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT);
if (Known.isNonNegative())
return false;
if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X))))
return replaceOperand(*II, 0, X);
- if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) {
+ if (std::optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) {
// abs(x) -> x if x >= 0
if (!*Sign)
return replaceInstUsesWith(*II, IIOperand);
return nullptr;
// Try to simplify this compare to T/F based on the dominating condition.
- Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
+ std::optional<bool> Imp =
+ isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
if (Imp)
return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
return Res;
- if (Optional<bool> Impl = isImpliedCondition(SI->getCondition(), Pred, Op,
- RHS, DL, SelectCondIsTrue))
+ if (std::optional<bool> Impl = isImpliedCondition(
+ SI->getCondition(), Pred, Op, RHS, DL, SelectCondIsTrue))
return ConstantInt::get(I.getType(), *Impl);
return nullptr;
};
assert(Op->getType()->isIntOrIntVectorTy(1) &&
"Op must be either i1 or vector of i1.");
- Optional<bool> Res = isImpliedCondition(Op, CondVal, DL, IsAnd);
+ std::optional<bool> Res = isImpliedCondition(Op, CondVal, DL, IsAnd);
if (!Res)
return nullptr;
// if c implies that b is false.
if (match(CondVal, m_LogicalOr(m_Value(A), m_Value(B))) &&
match(FalseVal, m_Zero())) {
- Optional<bool> Res = isImpliedCondition(TrueVal, B, DL);
+ std::optional<bool> Res = isImpliedCondition(TrueVal, B, DL);
if (Res && *Res == false)
return replaceOperand(SI, 0, A);
}
if (match(TrueVal, m_LogicalOr(m_Value(A), m_Value(B))) &&
match(FalseVal, m_Zero())) {
- Optional<bool> Res = isImpliedCondition(CondVal, B, DL);
+ std::optional<bool> Res = isImpliedCondition(CondVal, B, DL);
if (Res && *Res == false)
return replaceOperand(SI, 1, A);
}
// if c = false implies that b = true
if (match(TrueVal, m_One()) &&
match(FalseVal, m_LogicalAnd(m_Value(A), m_Value(B)))) {
- Optional<bool> Res = isImpliedCondition(CondVal, B, DL, false);
+ std::optional<bool> Res = isImpliedCondition(CondVal, B, DL, false);
if (Res && *Res == true)
return replaceOperand(SI, 2, A);
}
if (match(CondVal, m_LogicalAnd(m_Value(A), m_Value(B))) &&
match(TrueVal, m_One())) {
- Optional<bool> Res = isImpliedCondition(FalseVal, B, DL, false);
+ std::optional<bool> Res = isImpliedCondition(FalseVal, B, DL, false);
if (Res && *Res == true)
return replaceOperand(SI, 0, A);
}
return false;
bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB;
- Optional<bool> Implication =
+ std::optional<bool> Implication =
isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue);
// If the branch condition of BB (which is Cond) and CurrentPred are
/// Check whether the loop metadata is forcing distribution to be
/// enabled/disabled.
void setForced() {
- Optional<const MDOperand *> Value =
+ std::optional<const MDOperand *> Value =
findStringMetadataForLoop(L, "llvm.loop.distribute.enable");
if (!Value)
return;
break;
// Check to see if this store is to a constant offset from the start ptr.
- Optional<int64_t> Offset =
+ std::optional<int64_t> Offset =
isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
if (!Offset)
break;
break;
// Check to see if this store is to a constant offset from the start ptr.
- Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
+ std::optional<int64_t> Offset =
+ isPointerOffset(StartPtr, MSI->getDest(), DL);
if (!Offset)
break;
}
TransformationMode llvm::hasVectorizeTransformation(const Loop *L) {
- Optional<bool> Enable =
+ std::optional<bool> Enable =
getOptionalBoolLoopAttribute(L, "llvm.loop.vectorize.enable");
if (Enable == false)
#ifndef NDEBUG
for (const std::string &VariantMapping : VariantMappings) {
LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << VariantMapping << "'\n");
- Optional<VFInfo> VI = VFABI::tryDemangleForVFABI(VariantMapping, *M);
+ std::optional<VFInfo> VI = VFABI::tryDemangleForVFABI(VariantMapping, *M);
assert(VI && "Cannot add an invalid VFABI name.");
assert(M->getNamedValue(VI.value().VectorName) &&
"Cannot add variant to attribute: "
// If this basic block has dominating predecessor blocks and the dominating
// blocks' conditions imply BI's condition, we know the direction of BI.
- Optional<bool> Imp = isImpliedByDomCondition(BI->getCondition(), BI, DL);
+ std::optional<bool> Imp = isImpliedByDomCondition(BI->getCondition(), BI, DL);
if (Imp) {
// Turn this into a branch on constant.
auto *OldCond = BI->getCondition();
!LI2->isSimple())
return LookAheadHeuristics::ScoreFail;
- Optional<int> Dist = getPointersDiff(
+ std::optional<int> Dist = getPointersDiff(
LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
if (!Dist || *Dist == 0) {
Ptr0 = PointerOps[Order.front()];
PtrN = PointerOps[Order.back()];
}
- Optional<int> Diff =
+ std::optional<int> Diff =
getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
// Check that the sorted loads are consecutive.
if (static_cast<unsigned>(*Diff) == VL.size() - 1)
unsigned Cnt = 1;
for (Value *Ptr : VL.drop_front()) {
bool Found = any_of(Bases, [&](auto &Base) {
- Optional<int> Diff =
+ std::optional<int> Diff =
getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE,
/*StrictCheck=*/true);
if (!Diff)
Value *S0Ptr = S0->getPointerOperand();
for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
StoreInst *SI = StoresVec[Idx];
- Optional<int> Diff =
+ std::optional<int> Diff =
getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
SI->getPointerOperand(), *DL, *SE,
/*StrictCheck=*/true);
Ptr0 = PointerOps[CurrentOrder.front()];
PtrN = PointerOps[CurrentOrder.back()];
}
- Optional<int> Dist =
+ std::optional<int> Dist =
getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
// Check that the sorted pointer operands are consecutive.
if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
++IterCnt;
CheckedPairs[Idx].set(K);
CheckedPairs[K].set(Idx);
- Optional<int> Diff = getPointersDiff(
+ std::optional<int> Diff = getPointersDiff(
Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
Stores[Idx]->getValueOperand()->getType(),
Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true);
// Tests that template specializations are kept up to date
void *Null = nullptr;
po_iterator_storage<std::set<void *>, false> PIS;
- PIS.insertEdge(Optional<void *>(), Null);
+ PIS.insertEdge(std::optional<void *>(), Null);
ExtSetTy Ext;
po_iterator_storage<ExtSetTy, true> PISExt(Ext);
- PIS.insertEdge(Optional<void *>(), Null);
+ PIS.insertEdge(std::optional<void *>(), Null);
// Test above, but going through po_iterator (which inherits from template
// base)
BasicBlock *NullBB = nullptr;
auto PI = po_end(NullBB);
- PI.insertEdge(Optional<BasicBlock *>(), NullBB);
+ PI.insertEdge(std::optional<BasicBlock *>(), NullBB);
auto PIExt = po_ext_end(NullBB, Ext);
- PIExt.insertEdge(Optional<BasicBlock *>(), NullBB);
+ PIExt.insertEdge(std::optional<BasicBlock *>(), NullBB);
}
// Test post-order and reverse post-order traversals for simple graph type.
}
};
- llvm::Optional<TestAnalyses> Analyses;
+ std::optional<TestAnalyses> Analyses;
TestAnalyses &setupAnalyses() {
assert(F);
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
EXPECT_EQ(Bounds->getInitialIVValue().getName(), "ub");
EXPECT_EQ(Bounds->getStepInst().getName(), "inc");
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> InnerBounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> InnerBounds = L->getBounds(SE);
EXPECT_NE(InnerBounds, std::nullopt);
InitialIVValue =
dyn_cast<ConstantInt>(&InnerBounds->getInitialIVValue());
Loop *L = LI.getLoopFor(Header);
EXPECT_NE(L, nullptr);
- Optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
+ std::optional<Loop::LoopBounds> Bounds = L->getBounds(SE);
EXPECT_NE(Bounds, std::nullopt);
ConstantInt *InitialIVValue =
dyn_cast<ConstantInt>(&Bounds->getInitialIVValue());
auto *ScevXB = SE.getSCEV(getInstructionByName(F, "xb")); // {%pp,+,1}
auto *ScevIVNext = SE.getSCEV(getInstructionByName(F, "iv.next")); // {1,+,1}
- auto diff = [&SE](const SCEV *LHS, const SCEV *RHS) -> Optional<int> {
+ auto diff = [&SE](const SCEV *LHS, const SCEV *RHS) -> std::optional<int> {
auto ConstantDiffOrNone = computeConstantDifference(SE, LHS, RHS);
if (!ConstantDiffOrNone)
return std::nullopt;
})");
EXPECT_TRUE(!!Value);
LLVMContext Ctx;
- Optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value);
+ std::optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value);
EXPECT_TRUE(Spec);
EXPECT_EQ(*Spec, TensorSpec::createSpec<int32_t>("tensor_name", {1, 4}, 2));
}