Many llvm/IR/* files have been migrated by other contributors.
This migrates most remaining files.
/// The maximum percentage profiling weights can deviate from the expected
/// values in order to be included in misexpect diagnostics.
- Optional<uint32_t> DiagnosticsMisExpectTolerance = 0;
+ std::optional<uint32_t> DiagnosticsMisExpectTolerance = 0;
/// The name of a file to use with \c .secure_log_unique directives.
std::string AsSecureLogFile;
// Parse misexpect tolerance argument value.
// Valid option values are integers in the range [0, 100)
-inline Expected<Optional<uint32_t>> parseToleranceOption(StringRef Arg) {
+static Expected<std::optional<uint32_t>> parseToleranceOption(StringRef Arg) {
uint32_t Val;
if (Arg.getAsInteger(10, Val))
return llvm::createStringError(llvm::inconvertibleErrorCode(),
auto &HeaderNode = Loop.Nodes[H];
const BlockT *Block = getBlock(HeaderNode);
IsIrrLoopHeader.set(Loop.Nodes[H].Index);
- Optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
+ std::optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
if (!HeaderWeight) {
LLVM_DEBUG(dbgs() << "Missing irr loop header metadata on "
<< getBlockName(HeaderNode) << "\n");
BlockFrequencyInfoImplBase::getBlockProfileCount(
F->getFunction(), getNode(&BB)))
OS << ", count = " << ProfileCount.value();
- if (Optional<uint64_t> IrrLoopHeaderWeight =
- BB.getIrrLoopHeaderWeight())
+ if (std::optional<uint64_t> IrrLoopHeaderWeight =
+ BB.getIrrLoopHeaderWeight())
OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.value();
OS << "\n";
}
bool parseGlobalValue(Type *Ty, Constant *&C);
bool parseGlobalTypeAndValue(Constant *&V);
bool parseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
- Optional<unsigned> *InRangeOp = nullptr);
+ std::optional<unsigned> *InRangeOp = nullptr);
bool parseOptionalComdat(StringRef GlobalName, Comdat *&C);
bool parseSanitizer(GlobalVariable *GV);
bool parseMetadataAsValue(Value *&V, PerFunctionState &PFS);
using const_probability_iterator =
std::vector<BranchProbability>::const_iterator;
- Optional<uint64_t> IrrLoopHeaderWeight;
+ std::optional<uint64_t> IrrLoopHeaderWeight;
/// Keep track of the physical registers that are livein of the basicblock.
using LiveInVector = std::vector<RegisterMaskPair>;
/// Return the EHCatchret Symbol for this basic block.
MCSymbol *getEHCatchretSymbol() const;
- Optional<uint64_t> getIrrLoopHeaderWeight() const {
+ std::optional<uint64_t> getIrrLoopHeaderWeight() const {
return IrrLoopHeaderWeight;
}
/// This method can only be used on blocks that have a parent function.
bool isEntryBlock() const;
- Optional<uint64_t> getIrrLoopHeaderWeight() const;
+ std::optional<uint64_t> getIrrLoopHeaderWeight() const;
/// Returns true if the Order field of child Instructions is valid.
bool isInstrOrderValid() const {
#ifndef LLVM_IR_CONSTANTFOLD_H
#define LLVM_IR_CONSTANTFOLD_H
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/InstrTypes.h"
+#include <optional>
namespace llvm {
template <typename T> class ArrayRef;
Constant *ConstantFoldCompareInstruction(CmpInst::Predicate Predicate,
Constant *C1, Constant *C2);
Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool InBounds,
- Optional<unsigned> InRangeIndex,
+ std::optional<unsigned> InRangeIndex,
ArrayRef<Value *> Idxs);
} // End llvm namespace
/// Intersect the two ranges and return the result if it can be represented
/// exactly, otherwise return std::nullopt.
- Optional<ConstantRange> exactIntersectWith(const ConstantRange &CR) const;
+ std::optional<ConstantRange>
+ exactIntersectWith(const ConstantRange &CR) const;
/// Union the two ranges and return the result if it can be represented
/// exactly, otherwise return std::nullopt.
- Optional<ConstantRange> exactUnionWith(const ConstantRange &CR) const;
+ std::optional<ConstantRange> exactUnionWith(const ConstantRange &CR) const;
/// Return a new range representing the possible values resulting
/// from an application of the specified cast operator to this range. \p
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constant.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <optional>
namespace llvm {
static Constant *
getGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList,
bool InBounds = false,
- Optional<unsigned> InRangeIndex = std::nullopt,
+ std::optional<unsigned> InRangeIndex = std::nullopt,
Type *OnlyIfReducedTy = nullptr) {
return getGetElementPtr(
Ty, C, makeArrayRef((Value *const *)IdxList.data(), IdxList.size()),
}
static Constant *
getGetElementPtr(Type *Ty, Constant *C, Constant *Idx, bool InBounds = false,
- Optional<unsigned> InRangeIndex = std::nullopt,
+ std::optional<unsigned> InRangeIndex = std::nullopt,
Type *OnlyIfReducedTy = nullptr) {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
static Constant *
getGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList,
bool InBounds = false,
- Optional<unsigned> InRangeIndex = std::nullopt,
+ std::optional<unsigned> InRangeIndex = std::nullopt,
Type *OnlyIfReducedTy = nullptr);
/// Create an "inbounds" getelementptr. See the documentation for the
/// if index cannot be computed, e.g. because the type is not an aggregate.
/// ElemTy is updated to be the result element type and Offset to be the
/// residual offset.
- Optional<APInt> getGEPIndexForOffset(Type *&ElemTy, APInt &Offset) const;
+ std::optional<APInt> getGEPIndexForOffset(Type *&ElemTy, APInt &Offset) const;
/// Returns a StructLayout object, indicating the alignment of the
/// struct, its size, and the offsets of its fields.
#include "llvm-c/Types.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include <cstdint>
#include <functional>
#include <iterator>
+#include <optional>
#include <string>
namespace llvm {
///
/// Entry count is the number of times the function was executed.
/// When AllowSynthetic is false, only pgo_data will be returned.
- Optional<ProfileCount> getEntryCount(bool AllowSynthetic = false) const;
+ std::optional<ProfileCount> getEntryCount(bool AllowSynthetic = false) const;
/// Return true if the function is annotated with profile data.
///
void setSectionPrefix(StringRef Prefix);
/// Get the section prefix for this function.
- Optional<StringRef> getSectionPrefix() const;
+ std::optional<StringRef> getSectionPrefix() const;
/// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
/// to use during code generation.
#define LLVM_IR_GCSTRATEGY_H
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/Registry.h"
+#include <optional>
#include <string>
namespace llvm {
/// pointers to GC managed locations and false for pointers to non-GC
/// managed locations. Note a GCStrategy can always return 'None' (i.e. an
/// empty optional indicating it can't reliably distinguish.
- virtual Optional<bool> isGCManagedPointer(const Type *Ty) const {
+ virtual std::optional<bool> isGCManagedPointer(const Type *Ty) const {
return std::nullopt;
}
///@}
bool isAbsoluteSymbolRef() const;
/// If this is an absolute symbol reference, returns the range of the symbol,
- /// otherwise returns std::nullopt.
- Optional<ConstantRange> getAbsoluteSymbolRange() const;
+ /// otherwise returns None.
+ std::optional<ConstantRange> getAbsoluteSymbolRange() const;
/// This method unlinks 'this' from the containing module, but does not delete
/// it.
Constant *Initializer, const Twine &Name = "",
GlobalVariable *InsertBefore = nullptr,
ThreadLocalMode = NotThreadLocal,
- Optional<unsigned> AddressSpace = std::nullopt,
+ std::optional<unsigned> AddressSpace = std::nullopt,
bool isExternallyInitialized = false);
GlobalVariable(const GlobalVariable &) = delete;
GlobalVariable &operator=(const GlobalVariable &) = delete;
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/TypeSize.h"
+#include <optional>
#include <string>
namespace llvm {
// returns the declaration with the same signature and remangled name.
// An existing GlobalValue with the wanted name but with a wrong prototype
// or of the wrong kind will be renamed by adding ".renamed" to the name.
- llvm::Optional<Function*> remangleIntrinsicFunction(Function *F);
+ std::optional<Function *> remangleIntrinsicFunction(Function *F);
} // End Intrinsic namespace
#define LLVM_IR_LLVMCONTEXT_H
#include "llvm-c/Types.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/Support/CBindingWrapping.h"
#include <cstdint>
#include <memory>
+#include <optional>
#include <string>
namespace llvm {
bool getMisExpectWarningRequested() const;
void setMisExpectWarningRequested(bool Requested);
- void setDiagnosticsMisExpectTolerance(Optional<uint32_t> Tolerance);
+ void setDiagnosticsMisExpectTolerance(std::optional<uint32_t> Tolerance);
uint32_t getDiagnosticsMisExpectTolerance() const;
/// Return the minimum hotness value a diagnostic would need in order
#define LLVM_IR_MODULE_H
#include "llvm-c/Types.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cstddef>
+#include <optional>
namespace llvm {
/// Returns the offset of the index with an inrange attachment, or
/// std::nullopt if none.
- Optional<unsigned> getInRangeIndex() const {
+ std::optional<unsigned> getInRangeIndex() const {
if (SubclassOptionalData >> 1 == 0)
return std::nullopt;
return (SubclassOptionalData >> 1) - 1;
#ifndef LLVM_IR_PSEUDOPROBE_H
#define LLVM_IR_PSEUDOPROBE_H
-#include "llvm/ADT/Optional.h"
#include <cassert>
#include <cstdint>
#include <limits>
+#include <optional>
namespace llvm {
return Flags & (uint32_t)PseudoProbeAttributes::Sentinel;
}
-Optional<PseudoProbe> extractProbe(const Instruction &Inst);
+std::optional<PseudoProbe> extractProbe(const Instruction &Inst);
void setProbeDistributionFactor(Instruction &Inst, float Factor);
} // end namespace llvm
#ifndef LLVM_IR_STATEPOINT_H
#define LLVM_IR_STATEPOINT_H
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/TrackingMDRef.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include <cstddef>
#include <iterator>
#include <mutex>
+#include <optional>
#include <type_traits>
#include <utility>
using ExtraData = typename Config::ExtraData;
MapT Map;
- Optional<MDMapT> MDMap;
+ std::optional<MDMapT> MDMap;
ExtraData Data;
public:
MDMap.emplace();
return *MDMap;
}
- Optional<MDMapT> &getMDMap() { return MDMap; }
+ std::optional<MDMapT> &getMDMap() { return MDMap; }
/// Get the mapped metadata, if it's in the map.
- Optional<Metadata *> getMappedMD(const Metadata *MD) const {
+ std::optional<Metadata *> getMappedMD(const Metadata *MD) const {
if (!MDMap)
return std::nullopt;
auto Where = MDMap->find(MD);
/// If array indices are not pointer-sized integers, explicitly cast them so
/// that they aren't implicitly casted by the getelementptr.
Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
- Type *ResultTy, Optional<unsigned> InRangeIndex,
+ Type *ResultTy, std::optional<unsigned> InRangeIndex,
const DataLayout &DL, const TargetLibraryInfo *TLI) {
Type *IntIdxTy = DL.getIndexType(ResultTy);
Type *IntIdxScalarTy = IntIdxTy->getScalarType();
// Preserve the inrange index from the innermost GEP if possible. We must
// have calculated the same indices up to and including the inrange index.
- Optional<unsigned> InRangeIndex;
- if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
+ std::optional<unsigned> InRangeIndex;
+ if (std::optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
if (SrcElemTy == InnermostGEP->getSourceElementType() &&
NewIdxs.size() > *LastIRIndex) {
InRangeIndex = LastIRIndex;
return true;
}
- Optional<unsigned> InRangeOp;
+ std::optional<unsigned> InRangeOp;
if (parseGlobalValueVector(
Elts, Opc == Instruction::GetElementPtr ? &InRangeOp : nullptr) ||
parseToken(lltok::rparen, "expected ')' in constantexpr"))
/// ::= /*empty*/
/// ::= [inrange] TypeAndValue (',' [inrange] TypeAndValue)*
bool LLParser::parseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
- Optional<unsigned> *InRangeOp) {
+ std::optional<unsigned> *InRangeOp) {
// Empty list.
if (Lex.getKind() == lltok::rbrace ||
Lex.getKind() == lltok::rsquare ||
return makeArrayRef(getTrailingObjects<unsigned>(), NumOperands);
}
- Optional<unsigned> getInRangeIndex() const {
+ std::optional<unsigned> getInRangeIndex() const {
assert(Opcode == Instruction::GetElementPtr);
if (Extra == (unsigned)-1)
return std::nullopt;
Code = bitc::CST_CODE_CE_GEP;
const auto *GO = cast<GEPOperator>(C);
Record.push_back(VE.getTypeID(GO->getSourceElementType()));
- if (Optional<unsigned> Idx = GO->getInRangeIndex()) {
+ if (std::optional<unsigned> Idx = GO->getInRangeIndex()) {
Code = bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX;
Record.push_back((*Idx << 1) | GO->isInBounds());
} else if (GO->isInBounds())
// We don't want to proceed further for cold functions
// or functions of unknown hotness. Lukewarm functions have no prefix.
- Optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
+ std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
if (SectionPrefix && (SectionPrefix.value().equals("unlikely") ||
SectionPrefix.value().equals("unknown"))) {
return false;
bool HasPrefix = false;
if (const auto *F = dyn_cast<Function>(GO)) {
- if (Optional<StringRef> Prefix = F->getSectionPrefix()) {
+ if (std::optional<StringRef> Prefix = F->getSectionPrefix()) {
raw_svector_ostream(Name) << '.' << *Prefix;
HasPrefix = true;
}
StringRef COMDATSymName = Sym->getName();
if (const auto *F = dyn_cast<Function>(GO))
- if (Optional<StringRef> Prefix = F->getSectionPrefix())
+ if (std::optional<StringRef> Prefix = F->getSectionPrefix())
raw_svector_ostream(Name) << '$' << *Prefix;
// Append "$symbol" to the section name *before* IR-level mangling is
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
static_cast<CmpInst::Predicate>(CE->getPredicate()));
Out << " (";
- Optional<unsigned> InRangeOp;
+ std::optional<unsigned> InRangeOp;
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
WriterCtx.TypePrinter->print(GEP->getSourceElementType(), Out);
Out << ", ";
void printAPInt(StringRef Name, const APInt &Int, bool IsUnsigned,
bool ShouldSkipZero);
void printBool(StringRef Name, bool Value,
- Optional<bool> Default = std::nullopt);
+ std::optional<bool> Default = std::nullopt);
void printDIFlags(StringRef Name, DINode::DIFlags Flags);
void printDISPFlags(StringRef Name, DISubprogram::DISPFlags Flags);
template <class IntTy, class Stringifier>
}
void MDFieldPrinter::printBool(StringRef Name, bool Value,
- Optional<bool> Default) {
+ std::optional<bool> Default) {
if (Default && Value == *Default)
return;
Out << FS << Name << ": " << (Value ? "true" : "false");
return dyn_cast<LandingPadInst>(getFirstNonPHI());
}
-Optional<uint64_t> BasicBlock::getIrrLoopHeaderWeight() const {
+std::optional<uint64_t> BasicBlock::getIrrLoopHeaderWeight() const {
const Instruction *TI = getTerminator();
if (MDNode *MDIrrLoopHeader =
TI->getMetadata(LLVMContext::MD_irr_loop)) {
MDString *MDName = cast<MDString>(MDIrrLoopHeader->getOperand(0));
if (MDName->getString().equals("loop_header_weight")) {
auto *CI = mdconst::extract<ConstantInt>(MDIrrLoopHeader->getOperand(1));
- return Optional<uint64_t>(CI->getValue().getZExtValue());
+ return std::optional<uint64_t>(CI->getValue().getZExtValue());
}
}
return std::nullopt;
UsesMetadata = false;
}
- Optional<bool> isGCManagedPointer(const Type *Ty) const override {
+ std::optional<bool> isGCManagedPointer(const Type *Ty) const override {
// Method is only valid on pointer typed values.
const PointerType *PT = cast<PointerType>(Ty);
// For the sake of this example GC, we arbitrarily pick addrspace(1) as our
UsesMetadata = false;
}
- Optional<bool> isGCManagedPointer(const Type *Ty) const override {
+ std::optional<bool> isGCManagedPointer(const Type *Ty) const override {
// Method is only valid on pointer typed values.
const PointerType *PT = cast<PointerType>(Ty);
// We pick addrspace(1) as our GC managed heap.
// The combined GEP normally inherits its index inrange attribute from
// the inner GEP, but if the inner GEP's last index was adjusted by the
// outer GEP, any inbounds attribute on that index is invalidated.
- Optional<unsigned> IRIndex = GEP->getInRangeIndex();
+ std::optional<unsigned> IRIndex = GEP->getInRangeIndex();
if (IRIndex && *IRIndex == GEP->getNumIndices() - 1)
IRIndex = std::nullopt;
Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
bool InBounds,
- Optional<unsigned> InRangeIndex,
+ std::optional<unsigned> InRangeIndex,
ArrayRef<Value *> Idxs) {
if (Idxs.empty()) return C;
return ConstantRange(std::move(L), std::move(U));
}
-Optional<ConstantRange>
+std::optional<ConstantRange>
ConstantRange::exactIntersectWith(const ConstantRange &CR) const {
// TODO: This can be implemented more efficiently.
ConstantRange Result = intersectWith(CR);
return std::nullopt;
}
-Optional<ConstantRange>
+std::optional<ConstantRange>
ConstantRange::exactUnionWith(const ConstantRange &CR) const {
// TODO: This can be implemented more efficiently.
ConstantRange Result = unionWith(CR);
Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> Idxs, bool InBounds,
- Optional<unsigned> InRangeIndex,
+ std::optional<unsigned> InRangeIndex,
Type *OnlyIfReducedTy) {
PointerType *OrigPtrTy = cast<PointerType>(C->getType()->getScalarType());
assert(Ty && "Must specify element type");
return Index;
}
-Optional<APInt> DataLayout::getGEPIndexForOffset(Type *&ElemTy,
- APInt &Offset) const {
+std::optional<APInt> DataLayout::getGEPIndexForOffset(Type *&ElemTy,
+ APInt &Offset) const {
if (auto *ArrTy = dyn_cast<ArrayType>(ElemTy)) {
ElemTy = ArrTy->getElementType();
return getElementIndex(getTypeAllocSize(ElemTy), Offset);
SmallVector<APInt> Indices;
Indices.push_back(getElementIndex(getTypeAllocSize(ElemTy), Offset));
while (Offset != 0) {
- Optional<APInt> Index = getGEPIndexForOffset(ElemTy, Offset);
+ std::optional<APInt> Index = getGEPIndexForOffset(ElemTy, Offset);
if (!Index)
break;
Indices.push_back(*Index);
return true;
}
-Optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
+std::optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
SmallVector<Type *, 4> ArgTys;
if (!getIntrinsicSignature(F, ArgTys))
return std::nullopt;
setEntryCount(ProfileCount(Count, Type), Imports);
}
-Optional<ProfileCount> Function::getEntryCount(bool AllowSynthetic) const {
+std::optional<ProfileCount> Function::getEntryCount(bool AllowSynthetic) const {
MDNode *MD = getMetadata(LLVMContext::MD_prof);
if (MD && MD->getOperand(0))
if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0))) {
MDB.createFunctionSectionPrefix(Prefix));
}
-Optional<StringRef> Function::getSectionPrefix() const {
+std::optional<StringRef> Function::getSectionPrefix() const {
if (MDNode *MD = getMetadata(LLVMContext::MD_section_prefix)) {
assert(cast<MDString>(MD->getOperand(0))
->getString()
return GO->getMetadata(LLVMContext::MD_absolute_symbol);
}
-Optional<ConstantRange> GlobalValue::getAbsoluteSymbolRange() const {
+std::optional<ConstantRange> GlobalValue::getAbsoluteSymbolRange() const {
auto *GO = dyn_cast<GlobalObject>(this);
if (!GO)
return std::nullopt;
LinkageTypes Link, Constant *InitVal,
const Twine &Name, GlobalVariable *Before,
ThreadLocalMode TLMode,
- Optional<unsigned> AddressSpace,
+ std::optional<unsigned> AddressSpace,
bool isExternallyInitialized)
: GlobalObject(Ty, Value::GlobalVariableVal,
OperandTraits<GlobalVariable>::op_begin(this),
return pImpl->DiagnosticsHotnessThreshold.value_or(UINT64_MAX);
}
void LLVMContext::setDiagnosticsMisExpectTolerance(
- Optional<uint32_t> Tolerance) {
+ std::optional<uint32_t> Tolerance) {
pImpl->DiagnosticsMisExpectTolerance = Tolerance;
}
uint32_t LLVMContext::getDiagnosticsMisExpectTolerance() const {
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
/// The percentage of difference between profiling branch weights and
/// llvm.expect branch weights to tolerate when emiting MisExpect diagnostics
- Optional<uint32_t> DiagnosticsMisExpectTolerance = 0;
+ std::optional<uint32_t> DiagnosticsMisExpectTolerance = 0;
bool MisExpectWarningRequested = false;
/// The specialized remark streamer used by LLVM's OptimizationRemarkEmitter.
#include "llvm/IR/Metadata.def"
// Optional map for looking up composite types by identifier.
- Optional<DenseMap<const MDString *, DICompositeType *>> DITypeMap;
+ std::optional<DenseMap<const MDString *, DICompositeType *>> DITypeMap;
// MDNodes may be uniqued or not uniqued. When they're not uniqued, they
// aren't in the MDNodeSet, but they're still shared between objects, so no
void setOpaquePointers(bool OP);
private:
- Optional<bool> OpaquePointers;
+ std::optional<bool> OpaquePointers;
};
} // end namespace llvm
#include "llvm/IR/Module.h"
#include "SymbolTableListTraitsImpl.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/PassManager.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/PassManagerImpl.h"
#include <optional>
namespace llvm {
-Optional<PseudoProbe> extractProbeFromDiscriminator(const Instruction &Inst) {
+std::optional<PseudoProbe>
+extractProbeFromDiscriminator(const Instruction &Inst) {
assert(isa<CallBase>(&Inst) && !isa<IntrinsicInst>(&Inst) &&
"Only call instructions should have pseudo probe encodes as their "
"Dwarf discriminators");
return std::nullopt;
}
-Optional<PseudoProbe> extractProbe(const Instruction &Inst) {
+std::optional<PseudoProbe> extractProbe(const Instruction &Inst) {
if (const auto *II = dyn_cast<PseudoProbeInst>(&Inst)) {
PseudoProbe Probe;
Probe.Id = II->getIndex()->getZExtValue();
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
if (N->getOpcode() != ISD::TargetGlobalAddress)
return TM.getCodeModel() == CodeModel::Small;
- Optional<ConstantRange> CR =
+ std::optional<ConstantRange> CR =
cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
if (!CR)
return TM.getCodeModel() == CodeModel::Small;
// Check that the global's range fits into VT.
auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
- Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
+ std::optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
return false;
if (!GA)
return false;
- Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
+ std::optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
if (!CR)
return Width == 32 && TM.getCodeModel() == CodeModel::Small;
// Absolute symbols can be referenced directly.
if (GV) {
- if (Optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
+ if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
// See if we can use the 8-bit immediate form. Note that some instructions
// will sign extend the immediate operand, so to be conservative we only
// accept the range [0,128).
ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) {
assert(FunctionSamples::ProfileIsProbeBased &&
"Profile is not pseudo probe based");
- Optional<PseudoProbe> Probe = extractProbe(Inst);
+ std::optional<PseudoProbe> Probe = extractProbe(Inst);
// Ignore the non-probe instruction. If none of the instruction in the BB is
// probe, we choose to infer the BB's weight.
if (!Probe)
const FunctionSamples *
SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
if (FunctionSamples::ProfileIsProbeBased) {
- Optional<PseudoProbe> Probe = extractProbe(Inst);
+ std::optional<PseudoProbe> Probe = extractProbe(Inst);
if (!Probe)
return nullptr;
}
// aggregation of duplication.
if (Candidate.CallsiteDistribution < 1) {
for (auto &I : IFI.InlinedCallSites) {
- if (Optional<PseudoProbe> Probe = extractProbe(*I))
+ if (std::optional<PseudoProbe> Probe = extractProbe(*I))
setProbeDistributionFactor(*I, Probe->Factor *
Candidate.CallsiteDistribution);
}
return false;
float Factor = 1.0;
- if (Optional<PseudoProbe> Probe = extractProbe(*CB))
+ if (std::optional<PseudoProbe> Probe = extractProbe(*CB))
Factor = Probe->Factor;
uint64_t CallsiteCount =
// Prorate the callsite counts based on the pre-ICP distribution
// factor to reflect what is already done to the callsite before
// ICP, such as calliste cloning.
- if (Optional<PseudoProbe> Probe = extractProbe(I)) {
+ if (std::optional<PseudoProbe> Probe = extractProbe(I)) {
if (Probe->Factor < 1)
T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor);
}
void PseudoProbeVerifier::collectProbeFactors(const BasicBlock *Block,
ProbeFactorMap &ProbeFactors) {
for (const auto &I : *Block) {
- if (Optional<PseudoProbe> Probe = extractProbe(I)) {
+ if (std::optional<PseudoProbe> Probe = extractProbe(I)) {
uint64_t Hash = computeCallStackHash(I);
ProbeFactors[{Probe->Id, Hash}] += Probe->Factor;
}
ProbeFactorMap ProbeFactors;
for (auto &Block : F) {
for (auto &I : Block) {
- if (Optional<PseudoProbe> Probe = extractProbe(I)) {
+ if (std::optional<PseudoProbe> Probe = extractProbe(I)) {
uint64_t Hash = computeCallStackHash(I);
ProbeFactors[{Probe->Id, Hash}] += BBProfileCount(&Block);
}
// Fix up over-counted probes.
for (auto &Block : F) {
for (auto &I : Block) {
- if (Optional<PseudoProbe> Probe = extractProbe(I)) {
+ if (std::optional<PseudoProbe> Probe = extractProbe(I)) {
uint64_t Hash = computeCallStackHash(I);
float Sum = ProbeFactors[{Probe->Id, Hash}];
if (Sum != 0)
Type *Ty = V1->getType();
Value *NewV = V1;
- Optional<ConstantRange> CR = CR1.exactUnionWith(CR2);
+ std::optional<ConstantRange> CR = CR1.exactUnionWith(CR2);
if (!CR) {
if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() ||
CR2.isWrappedSet())
// Given what we're doing here and the semantics of guards, it would
// be correct to use a subset intersection, but that may be too
// aggressive in cases we care about.
- if (Optional<ConstantRange> Intersect = CR0.exactIntersectWith(CR1)) {
+ if (std::optional<ConstantRange> Intersect =
+ CR0.exactIntersectWith(CR1)) {
APInt NewRHSAP;
CmpInst::Predicate Pred;
if (Intersect->getEquivalentICmp(Pred, NewRHSAP)) {
const MutableValue *V = this;
while (const auto *Agg = V->Val.dyn_cast<MutableAggregate *>()) {
Type *AggTy = Agg->Ty;
- Optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
+ std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
if (!Index || Index->uge(Agg->Elements.size()) ||
!TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
return nullptr;
MutableAggregate *Agg = MV->Val.get<MutableAggregate *>();
Type *AggTy = Agg->Ty;
- Optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
+ std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
if (!Index || Index->uge(Agg->Elements.size()) ||
!TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
return false;
if (!Op)
return nullptr;
- if (Optional<Metadata *> MappedOp = M.getVM().getMappedMD(Op))
+ if (std::optional<Metadata *> MappedOp = M.getVM().getMappedMD(Op))
return *MappedOp;
if (isa<MDString>(Op))
Optional<Metadata *> Mapper::mapSimpleMetadata(const Metadata *MD) {
// If the value already exists in the map, use it.
- if (Optional<Metadata *> NewMD = getVM().getMappedMD(MD))
+ if (std::optional<Metadata *> NewMD = getVM().getMappedMD(MD))
return *NewMD;
if (isa<MDString>(MD))
SrcMBB.isInlineAsmBrIndirectTarget());
// FIXME: This is not serialized
- if (Optional<uint64_t> Weight = SrcMBB.getIrrLoopHeaderWeight())
+ if (std::optional<uint64_t> Weight = SrcMBB.getIrrLoopHeaderWeight())
DstMBB->setIrrLoopHeaderWeight(*Weight);
}
ConstantRange SignedCR = OpFn(CR1, CR2, ConstantRange::Signed);
TestRange(SignedCR, Elems, PreferSmallestNonFullSigned, {CR1, CR2});
- Optional<ConstantRange> ExactCR = ExactOpFn(CR1, CR2);
+ std::optional<ConstantRange> ExactCR = ExactOpFn(CR1, CR2);
if (SmallestCR.isSizeLargerThan(Elems.count())) {
EXPECT_TRUE(!ExactCR);
} else {