// This special-casing introduces less adaptor passes. If we have the need
// of adding module passes after function passes, we could change the
// implementation to accommodate that.
- Optional<bool> AddingFunctionPasses;
+ std::optional<bool> AddingFunctionPasses;
};
// Function object to maintain state while adding codegen machine passes.
bool getEnableDebugEntryValues();
bool getValueTrackingVariableLocations();
-Optional<bool> getExplicitValueTrackingVariableLocations();
+std::optional<bool> getExplicitValueTrackingVariableLocations();
bool getForceDwarfFrameSection();
#ifndef LLVM_CODEGEN_DEBUGHANDLERBASE_H
#define LLVM_CODEGEN_DEBUGHANDLERBASE_H
-#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/AsmPrinterHandler.h"
#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
+#include <optional>
namespace llvm {
SmallVector<int64_t, 1> LoadChain;
/// Present if the location is part of a larger variable.
- llvm::Optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
+ std::optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
/// Extract a VariableLocation from a MachineInstr.
/// This will only work if Instruction is a debug value instruction
/// and the associated DIExpression is in one of the supported forms.
/// If these requirements are not met, the returned Optional will not
/// have a value.
- static Optional<DbgVariableLocation>
+ static std::optional<DbgVariableLocation>
extractFromMachineInstruction(const MachineInstr &Instruction);
};
void profileMBBOpcode(GISelInstProfileBuilder &B, unsigned Opc) const;
void profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
- ArrayRef<SrcOp> SrcOps, Optional<unsigned> Flags,
+ ArrayRef<SrcOp> SrcOps, std::optional<unsigned> Flags,
GISelInstProfileBuilder &B) const;
// Takes a MachineInstrBuilder and inserts it into the CSEMap using the
// Unhide buildInstr
MachineInstrBuilder
buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps,
- Optional<unsigned> Flag = std::nullopt) override;
+ std::optional<unsigned> Flag = std::nullopt) override;
// Bring in the other overload from the base class.
using MachineIRBuilder::buildConstant;
/// Transform fp_instr(cst) to constant result of the fp operation.
bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst);
+ std::optional<APFloat> &Cst);
void applyCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst);
+ std::optional<APFloat> &Cst);
/// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
/// \param [in] Root - The search root.
///
/// \returns The Registers found during the search.
- Optional<SmallVector<Register, 8>>
+ std::optional<SmallVector<Register, 8>>
findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
/// Helper function for matchLoadOrCombine.
///
/// \returns On success, a 3-tuple containing lowest-index load found, the
/// lowest index, and the last load in the sequence.
- Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
+ std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
findLoadOffsetsForLoadOrCombine(
SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
const SmallVector<Register, 8> &RegsToVisit,
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineFunction.h"
#include <cstdint>
#include <functional>
#include <initializer_list>
+#include <optional>
#include <vector>
namespace llvm {
protected:
using ComplexRendererFns =
- Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
+ std::optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
using RecordedMIVector = SmallVector<MachineInstr *, 4>;
using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
/// and its callees rely upon.
Register findValueFromDefImpl(Register DefReg, unsigned StartBit,
unsigned Size) {
- Optional<DefinitionAndSourceRegister> DefSrcReg =
+ std::optional<DefinitionAndSourceRegister> DefSrcReg =
getDefSrcRegIgnoringCopies(DefReg, MRI);
MachineInstr *Def = DefSrcReg->MI;
DefReg = DefSrcReg->Reg;
}
template <typename ConstT>
-inline Optional<ConstT> matchConstant(Register, const MachineRegisterInfo &);
+inline std::optional<ConstT> matchConstant(Register,
+ const MachineRegisterInfo &);
template <>
-inline Optional<APInt> matchConstant(Register Reg,
- const MachineRegisterInfo &MRI) {
+inline std::optional<APInt> matchConstant(Register Reg,
+ const MachineRegisterInfo &MRI) {
return getIConstantVRegVal(Reg, MRI);
}
template <>
-inline Optional<int64_t> matchConstant(Register Reg,
- const MachineRegisterInfo &MRI) {
+inline std::optional<int64_t> matchConstant(Register Reg,
+ const MachineRegisterInfo &MRI) {
return getIConstantVRegSExtVal(Reg, MRI);
}
}
template <typename ConstT>
-inline Optional<ConstT> matchConstantSplat(Register,
- const MachineRegisterInfo &);
+inline std::optional<ConstT> matchConstantSplat(Register,
+ const MachineRegisterInfo &);
template <>
-inline Optional<APInt> matchConstantSplat(Register Reg,
- const MachineRegisterInfo &MRI) {
+inline std::optional<APInt> matchConstantSplat(Register Reg,
+ const MachineRegisterInfo &MRI) {
return getIConstantSplatVal(Reg, MRI);
}
template <>
-inline Optional<int64_t> matchConstantSplat(Register Reg,
- const MachineRegisterInfo &MRI) {
+inline std::optional<int64_t>
+matchConstantSplat(Register Reg, const MachineRegisterInfo &MRI) {
return getIConstantSplatSExtVal(Reg, MRI);
}
}
struct GCstAndRegMatch {
- Optional<ValueAndVReg> &ValReg;
- GCstAndRegMatch(Optional<ValueAndVReg> &ValReg) : ValReg(ValReg) {}
+ std::optional<ValueAndVReg> &ValReg;
+ GCstAndRegMatch(std::optional<ValueAndVReg> &ValReg) : ValReg(ValReg) {}
bool match(const MachineRegisterInfo &MRI, Register Reg) {
ValReg = getIConstantVRegValWithLookThrough(Reg, MRI);
return ValReg ? true : false;
}
};
-inline GCstAndRegMatch m_GCst(Optional<ValueAndVReg> &ValReg) {
+inline GCstAndRegMatch m_GCst(std::optional<ValueAndVReg> &ValReg) {
return GCstAndRegMatch(ValReg);
}
struct GFCstAndRegMatch {
- Optional<FPValueAndVReg> &FPValReg;
- GFCstAndRegMatch(Optional<FPValueAndVReg> &FPValReg) : FPValReg(FPValReg) {}
+ std::optional<FPValueAndVReg> &FPValReg;
+ GFCstAndRegMatch(std::optional<FPValueAndVReg> &FPValReg)
+ : FPValReg(FPValReg) {}
bool match(const MachineRegisterInfo &MRI, Register Reg) {
FPValReg = getFConstantVRegValWithLookThrough(Reg, MRI);
return FPValReg ? true : false;
}
};
-inline GFCstAndRegMatch m_GFCst(Optional<FPValueAndVReg> &FPValReg) {
+inline GFCstAndRegMatch m_GFCst(std::optional<FPValueAndVReg> &FPValReg) {
return GFCstAndRegMatch(FPValReg);
}
struct GFCstOrSplatGFCstMatch {
- Optional<FPValueAndVReg> &FPValReg;
- GFCstOrSplatGFCstMatch(Optional<FPValueAndVReg> &FPValReg)
+ std::optional<FPValueAndVReg> &FPValReg;
+ GFCstOrSplatGFCstMatch(std::optional<FPValueAndVReg> &FPValReg)
: FPValReg(FPValReg) {}
bool match(const MachineRegisterInfo &MRI, Register Reg) {
return (FPValReg = getFConstantSplat(Reg, MRI)) ||
};
inline GFCstOrSplatGFCstMatch
-m_GFCstOrSplat(Optional<FPValueAndVReg> &FPValReg) {
+m_GFCstOrSplat(std::optional<FPValueAndVReg> &FPValReg) {
return GFCstOrSplatGFCstMatch(FPValReg);
}
/// type as \p Op0 or \p Op0 itself.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- Optional<MachineInstrBuilder> materializePtrAdd(Register &Res, Register Op0,
- const LLT ValueTy,
- uint64_t Value);
+ std::optional<MachineInstrBuilder> materializePtrAdd(Register &Res,
+ Register Op0,
+ const LLT ValueTy,
+ uint64_t Value);
/// Build and insert \p Res = G_PTRMASK \p Op0, \p Op1
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
/// Build and insert \p Res = G_FPEXT \p Op
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FPEXT, {Res}, {Op}, Flags);
}
/// \pre \p Res must be smaller than \p Op
///
/// \return The newly created instruction.
- MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op,
- Optional<unsigned> Flags = std::nullopt);
+ MachineInstrBuilder
+ buildFPTrunc(const DstOp &Res, const SrcOp &Op,
+ std::optional<unsigned> Flags = std::nullopt);
/// Build and insert \p Res = G_TRUNC \p Op
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res,
const SrcOp &Op0, const SrcOp &Op1,
- Optional<unsigned> Flags = std::nullopt);
+ std::optional<unsigned> Flags = std::nullopt);
/// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst,
const SrcOp &Op0, const SrcOp &Op1,
- Optional<unsigned> Flags = std::nullopt);
+ std::optional<unsigned> Flags = std::nullopt);
/// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
/// \p Elt, \p Idx
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags);
}
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_UREM \p Op0, \p Op1
MachineInstrBuilder buildURem(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_UREM, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMUL, {Dst}, {Src0, Src1}, Flags);
}
- MachineInstrBuilder buildFMinNum(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ MachineInstrBuilder
+ buildFMinNum(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMINNUM, {Dst}, {Src0, Src1}, Flags);
}
- MachineInstrBuilder buildFMaxNum(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ MachineInstrBuilder
+ buildFMaxNum(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMAXNUM, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder
buildFMinNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMINNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder
buildFMaxNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMAXNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_SHL, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_LSHR, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_ASHR, {Dst}, {Src0, Src1}, Flags);
}
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_OR, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_FADD \p Op0, \p Op1
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_STRICT_FADD \p Op0, \p Op1
- MachineInstrBuilder buildStrictFAdd(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ MachineInstrBuilder
+ buildStrictFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_STRICT_FADD, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_FSUB \p Op0, \p Op1
MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_FDIV \p Op0, \p Op1
MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1, const SrcOp &Src2,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2}, Flags);
}
/// Build and insert \p Res = G_FMAD \p Op0, \p Op1, \p Op2
MachineInstrBuilder buildFMAD(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1, const SrcOp &Src2,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FMAD, {Dst}, {Src0, Src1, Src2}, Flags);
}
/// Build and insert \p Res = G_FNEG \p Op0
MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0}, Flags);
}
/// Build and insert \p Res = G_FABS \p Op0
MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0}, Flags);
}
/// Build and insert \p Dst = G_FCANONICALIZE \p Src0
MachineInstrBuilder
buildFCanonicalize(const DstOp &Dst, const SrcOp &Src0,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FCANONICALIZE, {Dst}, {Src0}, Flags);
}
/// Build and insert \p Dst = G_INTRINSIC_TRUNC \p Src0
MachineInstrBuilder
buildIntrinsicTrunc(const DstOp &Dst, const SrcOp &Src0,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {Dst}, {Src0}, Flags);
}
/// Build and insert \p Res = GFFLOOR \p Op0, \p Op1
- MachineInstrBuilder buildFFloor(const DstOp &Dst, const SrcOp &Src0,
- Optional<unsigned> Flags = std::nullopt) {
+ MachineInstrBuilder
+ buildFFloor(const DstOp &Dst, const SrcOp &Src0,
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FFLOOR, {Dst}, {Src0}, Flags);
}
/// Build and insert \p Dst = G_FLOG \p Src
MachineInstrBuilder buildFLog(const DstOp &Dst, const SrcOp &Src,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FLOG, {Dst}, {Src}, Flags);
}
/// Build and insert \p Dst = G_FLOG2 \p Src
MachineInstrBuilder buildFLog2(const DstOp &Dst, const SrcOp &Src,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FLOG2, {Dst}, {Src}, Flags);
}
/// Build and insert \p Dst = G_FEXP2 \p Src
MachineInstrBuilder buildFExp2(const DstOp &Dst, const SrcOp &Src,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
}
/// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
- Optional<unsigned> Flags = std::nullopt) {
+ std::optional<unsigned> Flags = std::nullopt) {
return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
}
virtual MachineInstrBuilder
buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps,
- Optional<unsigned> Flags = std::nullopt);
+ std::optional<unsigned> Flags = std::nullopt);
};
} // End namespace llvm.
MachineOptimizationRemarkMissed &R);
/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
-Optional<APInt> getIConstantVRegVal(Register VReg,
- const MachineRegisterInfo &MRI);
+std::optional<APInt> getIConstantVRegVal(Register VReg,
+ const MachineRegisterInfo &MRI);
/// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
-Optional<int64_t> getIConstantVRegSExtVal(Register VReg,
- const MachineRegisterInfo &MRI);
+std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
+ const MachineRegisterInfo &MRI);
/// Simple struct used to hold a constant integer value and a virtual
/// register.
/// If \p VReg is defined by a statically evaluable chain of instructions rooted
/// on a G_CONSTANT returns its APInt value and def register.
-Optional<ValueAndVReg>
+std::optional<ValueAndVReg>
getIConstantVRegValWithLookThrough(Register VReg,
const MachineRegisterInfo &MRI,
bool LookThroughInstrs = true);
/// If \p VReg is defined by a statically evaluable chain of instructions rooted
/// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
-Optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
Register VReg, const MachineRegisterInfo &MRI,
bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
/// If \p VReg is defined by a statically evaluable chain of instructions rooted
/// on a G_FCONSTANT returns its APFloat value and def register.
-Optional<FPValueAndVReg>
+std::optional<FPValueAndVReg>
getFConstantVRegValWithLookThrough(Register VReg,
const MachineRegisterInfo &MRI,
bool LookThroughInstrs = true);
/// away any copies.
///
/// Also walks through hints such as G_ASSERT_ZEXT.
-Optional<DefinitionAndSourceRegister>
+std::optional<DefinitionAndSourceRegister>
getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
/// Find the def instruction for \p Reg, folding away any trivial copies. May
/// fallback.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
-Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
- const Register Op2,
- const MachineRegisterInfo &MRI);
-Optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
- const Register Op2,
- const MachineRegisterInfo &MRI);
+std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
+ const Register Op2,
+ const MachineRegisterInfo &MRI);
+std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
+ const Register Op2,
+ const MachineRegisterInfo &MRI);
/// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
/// Returns an empty vector on failure.
const Register Op2,
const MachineRegisterInfo &MRI);
-Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
- uint64_t Imm, const MachineRegisterInfo &MRI);
+std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
+ uint64_t Imm,
+ const MachineRegisterInfo &MRI);
-Optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
- Register Src,
- const MachineRegisterInfo &MRI);
+std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
+ Register Src,
+ const MachineRegisterInfo &MRI);
/// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
/// then it tries to do an element-wise constant fold.
-Optional<SmallVector<unsigned>>
+std::optional<SmallVector<unsigned>>
ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
/// Test if the given value is known to have exactly one bit set. This differs
/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
/// If \p MI is not a splat, returns std::nullopt.
-Optional<int> getSplatIndex(MachineInstr &MI);
+std::optional<int> getSplatIndex(MachineInstr &MI);
/// \returns the scalar integral splat value of \p Reg if possible.
-Optional<APInt> getIConstantSplatVal(const Register Reg,
- const MachineRegisterInfo &MRI);
+std::optional<APInt> getIConstantSplatVal(const Register Reg,
+ const MachineRegisterInfo &MRI);
/// \returns the scalar integral splat value defined by \p MI if possible.
-Optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
+std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
/// \returns the scalar sign extended integral splat value of \p Reg if
/// possible.
-Optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
- const MachineRegisterInfo &MRI);
+std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
+ const MachineRegisterInfo &MRI);
/// \returns the scalar sign extended integral splat value defined by \p MI if
/// possible.
-Optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
+std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
/// Returns a floating point scalar constant of a build vector splat if it
/// exists. When \p AllowUndef == true some elements can be undef but not all.
-Optional<FPValueAndVReg> getFConstantSplat(Register VReg,
- const MachineRegisterInfo &MRI,
- bool AllowUndef = true);
+std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
+ const MachineRegisterInfo &MRI,
+ bool AllowUndef = true);
/// Return true if the specified register is defined by G_BUILD_VECTOR or
/// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
/// \endcode
///
/// In the above case, this will return a RegOrConstant containing 4.
-Optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
+std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
/// Determines if \p MI defines a constant integer or a build vector of
/// constant integers. Treats undef values as constants.
/// Determines if \p MI defines a constant integer or a splat vector of
/// constant integers.
/// \returns the scalar constant or std::nullopt.
-Optional<APInt> isConstantOrConstantSplatVector(MachineInstr &MI,
- const MachineRegisterInfo &MRI);
+std::optional<APInt>
+isConstantOrConstantSplatVector(MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
/// Attempt to match a unary predicate against a scalar/splat constant or every
/// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
bool isVPReduction(unsigned Opcode);
/// The operand position of the vector mask.
-Optional<unsigned> getVPMaskIdx(unsigned Opcode);
+std::optional<unsigned> getVPMaskIdx(unsigned Opcode);
/// The operand position of the explicit vector length parameter.
-Optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
+std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
//===--------------------------------------------------------------------===//
/// MemIndexedMode enum - This enum defines the load / store indexed
#ifndef LLVM_CODEGEN_MIRFORMATTER_H
#define LLVM_CODEGEN_MIRFORMATTER_H
-#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
+#include <optional>
namespace llvm {
/// that we can have more meaningful mnemonic than a 64-bit integer. Passing
/// None to OpIdx means the index is unknown.
virtual void printImm(raw_ostream &OS, const MachineInstr &MI,
- Optional<unsigned> OpIdx, int64_t Imm) const {
+ std::optional<unsigned> OpIdx, int64_t Imm) const {
OS << Imm;
}
/// Fixed unique ID assigned to this basic block upon creation. Used with
/// basic block sections and basic block labels.
- Optional<unsigned> BBID;
+ std::optional<unsigned> BBID;
/// With basic block sections, this stores the Section ID of the basic block.
MBBSectionID SectionID{0};
void setIsEndSection(bool V = true) { IsEndSection = V; }
- Optional<unsigned> getBBID() const { return BBID; }
+ std::optional<unsigned> getBBID() const { return BBID; }
/// Returns the BBID of the block when BBAddrMapVersion >= 2, otherwise
/// returns `MachineBasicBlock::Number`.
bool allDefsAreDead() const;
/// Return a valid size if the instruction is a spill instruction.
- Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
+ std::optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
/// Return a valid size if the instruction is a folded spill instruction.
- Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
+ std::optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
/// Return a valid size if the instruction is a restore instruction.
- Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
+ std::optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
/// Return a valid size if the instruction is a folded restore instruction.
- Optional<unsigned>
+ std::optional<unsigned>
getFoldedRestoreSize(const TargetInstrInfo *TII) const;
/// Copy implicit register operands from specified
/// information from it's parent.
/// \param IntrinsicInfo - same as \p TRI.
void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint,
- Optional<unsigned> OpIdx, bool PrintDef, bool IsStandalone,
+ std::optional<unsigned> OpIdx, bool PrintDef, bool IsStandalone,
bool ShouldPrintRegisterTies, unsigned TiedOperandIdx,
const TargetRegisterInfo *TRI,
const TargetIntrinsicInfo *IntrinsicInfo) const;
private:
SDValue Base;
SDValue Index;
- Optional<int64_t> Offset;
+ std::optional<int64_t> Offset;
bool IsIndexSignExt = false;
public:
// Returns true `Op0` and `Op1` can be proven to alias/not alias, in
// which case `IsAlias` is set to true/false.
static bool computeAliasing(const SDNode *Op0,
- const Optional<int64_t> NumBytes0,
+ const std::optional<int64_t> NumBytes0,
const SDNode *Op1,
- const Optional<int64_t> NumBytes1,
+ const std::optional<int64_t> NumBytes1,
const SelectionDAG &DAG, bool &IsAlias);
/// Parses tree in N for base, index, offset addresses.
/// If this BuildVector is constant and represents the numerical series
/// "<a, a+n, a+2n, a+3n, ...>" where a is integer and n is a non-zero integer,
/// the value "<a,n>" is returned.
- Optional<std::pair<APInt, APInt>> isConstantSequence() const;
+ std::optional<std::pair<APInt, APInt>> isConstantSequence() const;
/// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
/// Undef elements are treated as zero, and entirely undefined elements are
>;
// Fold fp_op(cst) to the constant result of the floating point operation.
-def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
+def constant_fp_op_matchinfo: GIDefMatchData<"std::optional<APFloat>">;
def constant_fp_op: GICombineRule <
(defs root:$root, constant_fp_op_matchinfo:$info),
(match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
// We assume a single instruction only has a spill or reload, not
// both.
- Optional<unsigned> Size;
+ std::optional<unsigned> Size;
if ((Size = MI.getRestoreSize(TII))) {
CommentOS << *Size << "-byte Reload\n";
} else if ((Size = MI.getFoldedRestoreSize(TII))) {
assert(DVInst->isDebugValue() && "Invalid History entry");
// FIXME: Find a way to represent constant variables, since they are
// relatively common.
- Optional<DbgVariableLocation> Location =
+ std::optional<DbgVariableLocation> Location =
DbgVariableLocation::extractFromMachineInstruction(*DVInst);
if (!Location)
{
/// variable's lexical scope instruction ranges.
static cl::opt<bool> TrimVarLocs("trim-var-locs", cl::Hidden, cl::init(true));
-Optional<DbgVariableLocation>
+std::optional<DbgVariableLocation>
DbgVariableLocation::extractFromMachineInstruction(
const MachineInstr &Instruction) {
DbgVariableLocation Location;
DbgVariable &V;
const MachineInstr &MI;
size_t ListIndex;
- Optional<uint8_t> TagOffset;
+ std::optional<uint8_t> TagOffset;
public:
ListBuilder(DebugLocStream &Locs, DwarfCompileUnit &CU, AsmPrinter &Asm,
AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef);
ContextCU->addSInt(*AbsDef, dwarf::DW_AT_inline,
- DD->getDwarfVersion() <= 4 ? Optional<dwarf::Form>()
+ DD->getDwarfVersion() <= 4 ? std::optional<dwarf::Form>()
: dwarf::DW_FORM_implicit_const,
dwarf::DW_INL_inlined);
if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef))
/// Index of the entry list in DebugLocs.
unsigned DebugLocListIndex = ~0u;
/// DW_OP_LLVM_tag_offset value from DebugLocs.
- Optional<uint8_t> DebugLocListTagOffset;
+ std::optional<uint8_t> DebugLocListTagOffset;
/// Single value location description.
std::unique_ptr<DbgValueLoc> ValueLoc = nullptr;
void setDebugLocListIndex(unsigned O) { DebugLocListIndex = O; }
unsigned getDebugLocListIndex() const { return DebugLocListIndex; }
void setDebugLocListTagOffset(uint8_t O) { DebugLocListTagOffset = O; }
- Optional<uint8_t> getDebugLocListTagOffset() const { return DebugLocListTagOffset; }
+ std::optional<uint8_t> getDebugLocListTagOffset() const {
+ return DebugLocListTagOffset;
+ }
StringRef getName() const { return getVariable()->getName(); }
const DbgValueLoc *getValueLoc() const { return ValueLoc.get(); }
/// Get the FI entries, sorted by fragment offset.
// and not any other parts of the following DWARF expression.
assert(!IsEmittingEntryValue && "Can't emit entry value around expression");
- Optional<DIExpression::ExprOperand> PrevConvertOp;
+ std::optional<DIExpression::ExprOperand> PrevConvertOp;
while (ExprCursor) {
auto Op = ExprCursor.take();
DIExpressionCursor(const DIExpressionCursor &) = default;
/// Consume one operation.
- Optional<DIExpression::ExprOperand> take() {
+ std::optional<DIExpression::ExprOperand> take() {
if (Start == End)
return std::nullopt;
return *(Start++);
void consume(unsigned N) { std::advance(Start, N); }
/// Return the current operation.
- Optional<DIExpression::ExprOperand> peek() const {
+ std::optional<DIExpression::ExprOperand> peek() const {
if (Start == End)
return std::nullopt;
return *(Start);
}
/// Return the next operation.
- Optional<DIExpression::ExprOperand> peekNext() const {
+ std::optional<DIExpression::ExprOperand> peekNext() const {
if (Start == End)
return std::nullopt;
bool isParameterValue() { return LocationFlags & CallSiteParamValue; }
- Optional<uint8_t> TagOffset;
+ std::optional<uint8_t> TagOffset;
protected:
/// Push a DW_OP_piece / DW_OP_bit_piece for emitting later, if one is needed
}
void DwarfUnit::addUInt(DIEValueList &Die, dwarf::Attribute Attribute,
- Optional<dwarf::Form> Form, uint64_t Integer) {
+ std::optional<dwarf::Form> Form, uint64_t Integer) {
if (!Form)
Form = DIEInteger::BestForm(false, Integer);
assert(Form != dwarf::DW_FORM_implicit_const &&
}
void DwarfUnit::addSInt(DIEValueList &Die, dwarf::Attribute Attribute,
- Optional<dwarf::Form> Form, int64_t Integer) {
+ std::optional<dwarf::Form> Form, int64_t Integer) {
if (!Form)
Form = DIEInteger::BestForm(true, Integer);
addAttribute(Die, Attribute, *Form, DIEInteger(Integer));
}
-void DwarfUnit::addSInt(DIELoc &Die, Optional<dwarf::Form> Form,
+void DwarfUnit::addSInt(DIELoc &Die, std::optional<dwarf::Form> Form,
int64_t Integer) {
addSInt(Die, (dwarf::Attribute)0, Form, Integer);
}
#include "DwarfDebug.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/Target/TargetMachine.h"
+#include <optional>
#include <string>
namespace llvm {
/// Add an unsigned integer attribute data and value.
void addUInt(DIEValueList &Die, dwarf::Attribute Attribute,
- Optional<dwarf::Form> Form, uint64_t Integer);
+ std::optional<dwarf::Form> Form, uint64_t Integer);
void addUInt(DIEValueList &Block, dwarf::Form Form, uint64_t Integer);
/// Add an signed integer attribute data and value.
void addSInt(DIEValueList &Die, dwarf::Attribute Attribute,
- Optional<dwarf::Form> Form, int64_t Integer);
+ std::optional<dwarf::Form> Form, int64_t Integer);
- void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer);
+ void addSInt(DIELoc &Die, std::optional<dwarf::Form> Form, int64_t Integer);
/// Add a string attribute data and value.
///
/// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression
/// doesn't explicitly describe a memory location with DW_OP_deref or if the
/// expression is too complex to interpret.
-static Optional<int64_t> getDerefOffsetInBytes(const DIExpression *DIExpr) {
+static std::optional<int64_t>
+getDerefOffsetInBytes(const DIExpression *DIExpr) {
int64_t Offset = 0;
const unsigned NumElements = DIExpr->getNumElements();
const auto Elements = DIExpr->getElements();
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/Passes.h"
#define INVALID_OFFSET INT_MAX
/// contains the location where CSR register is saved.
struct CSRSavedLocation {
- CSRSavedLocation(Optional<unsigned> R, Optional<int> O)
+ CSRSavedLocation(std::optional<unsigned> R, std::optional<int> O)
: Reg(R), Offset(O) {}
- Optional<unsigned> Reg;
- Optional<int> Offset;
+ std::optional<unsigned> Reg;
+ std::optional<int> Offset;
};
/// Contains cfa offset and register values valid at entry and exit of basic
// Determine cfa offset and register set by the block.
for (MachineInstr &MI : *MBBInfo.MBB) {
if (MI.isCFIInstruction()) {
- Optional<unsigned> CSRReg;
- Optional<int> CSROffset;
+ std::optional<unsigned> CSRReg;
+ std::optional<int> CSROffset;
unsigned CFIIndex = MI.getOperand(0).getCFIIndex();
const MCCFIInstruction &CFI = Instrs[CFIIndex];
switch (CFI.getOperation()) {
void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
ArrayRef<SrcOp> SrcOps,
- Optional<unsigned> Flags,
+ std::optional<unsigned> Flags,
GISelInstProfileBuilder &B) const {
profileMBBOpcode(B, Opc);
MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
ArrayRef<DstOp> DstOps,
ArrayRef<SrcOp> SrcOps,
- Optional<unsigned> Flag) {
+ std::optional<unsigned> Flag) {
switch (Opc) {
default:
break;
break;
}
- if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(),
- SrcOps[1].getReg(), *getMRI()))
+ if (std::optional<APInt> Cst = ConstantFoldBinOp(
+ Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI()))
return buildConstant(DstOps[0], *Cst);
break;
}
// Try to constant fold these.
assert(SrcOps.size() == 2 && "Invalid sources");
assert(DstOps.size() == 1 && "Invalid dsts");
- if (Optional<APFloat> Cst = ConstantFoldFPBinOp(
+ if (std::optional<APFloat> Cst = ConstantFoldFPBinOp(
Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI()))
return buildFConstant(DstOps[0], *Cst);
break;
// Try to constant fold these.
assert(SrcOps.size() == 1 && "Invalid sources");
assert(DstOps.size() == 1 && "Invalid dsts");
- if (Optional<APFloat> Cst = ConstantFoldIntToFloat(
+ if (std::optional<APFloat> Cst = ConstantFoldIntToFloat(
Opc, DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getReg(), *getMRI()))
return buildFConstant(DstOps[0], *Cst);
break;
/// 1 1 2
/// 2 2 1
/// 3 3 0
-static Optional<bool>
+static std::optional<bool>
isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
int64_t LowestIdx) {
// Need at least two byte positions to decide on endianness.
LegalizerHelper::LegalizeResult::Legalized;
}
-static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
- const Register Op,
- const MachineRegisterInfo &MRI) {
+static std::optional<APFloat>
+constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op,
+ const MachineRegisterInfo &MRI) {
const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
if (!MaybeCst)
return std::nullopt;
return V;
}
-bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst) {
+bool CombinerHelper::matchCombineConstantFoldFpUnary(
+ MachineInstr &MI, std::optional<APFloat> &Cst) {
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(DstReg);
return Cst.has_value();
}
-void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst) {
+void CombinerHelper::applyCombineConstantFoldFpUnary(
+ MachineInstr &MI, std::optional<APFloat> &Cst) {
assert(Cst && "Optional is unexpectedly empty!");
Builder.setInstrAndDebugLoc(MI);
MachineFunction &MF = Builder.getMF();
return true;
}
-Optional<SmallVector<Register, 8>>
+std::optional<SmallVector<Register, 8>>
CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
// We want to detect if Root is part of a tree which represents a bunch
return std::make_pair(Load, Shift / MemSizeInBits);
}
-Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
+std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
CombinerHelper::findLoadOffsetsForLoadOrCombine(
SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
// pattern. If it does, then we can represent it using a load + possibly a
// BSWAP.
bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
- Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
+ std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
if (!IsBigEndian)
return false;
bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
// G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
// if and only if (G_PTR_ADD X, C) has one use.
Register LHSBase;
- Optional<ValueAndVReg> LHSCstOff;
+ std::optional<ValueAndVReg> LHSCstOff;
if (!mi_match(MI.getBaseReg(), MRI,
m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
return false;
return MRI.getType(MatchInfo) == DstVecTy;
}
- Optional<ValueAndVReg> ShiftAmount;
+ std::optional<ValueAndVReg> ShiftAmount;
const auto LoPattern = m_GBitcast(m_Reg(Lo));
const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount));
if (mi_match(
Register &MatchInfo) {
// Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
// y if K == size of vector element type
- Optional<ValueAndVReg> ShiftAmt;
+ std::optional<ValueAndVReg> ShiftAmt;
if (!mi_match(MI.getOperand(1).getReg(), MRI,
m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))),
m_GCst(ShiftAmt))))
return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
}
-Optional<MachineInstrBuilder>
+std::optional<MachineInstrBuilder>
MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
const LLT ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
- const SrcOp &Op,
- Optional<unsigned> Flags) {
+MachineInstrBuilder
+MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
+ std::optional<unsigned> Flags) {
return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
}
const DstOp &Res,
const SrcOp &Op0,
const SrcOp &Op1,
- Optional<unsigned> Flags) {
+ std::optional<unsigned> Flags) {
return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
}
-MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
- const SrcOp &Tst,
- const SrcOp &Op0,
- const SrcOp &Op1,
- Optional<unsigned> Flags) {
+MachineInstrBuilder
+MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
+ const SrcOp &Op0, const SrcOp &Op1,
+ std::optional<unsigned> Flags) {
return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
}
#endif
}
-MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
- ArrayRef<DstOp> DstOps,
- ArrayRef<SrcOp> SrcOps,
- Optional<unsigned> Flags) {
+MachineInstrBuilder
+MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ std::optional<unsigned> Flags) {
switch (Opc) {
default:
break;
reportGISelFailure(MF, TPC, MORE, R);
}
-Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
- const MachineRegisterInfo &MRI) {
- Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
+std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
+ const MachineRegisterInfo &MRI) {
+ std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
VReg, MRI, /*LookThroughInstrs*/ false);
assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
"Value found while looking through instrs");
return ValAndVReg->Value;
}
-Optional<int64_t>
+std::optional<int64_t>
llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
- Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
+ std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
if (Val && Val->getBitWidth() <= 64)
return Val->getSExtValue();
return std::nullopt;
namespace {
typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
-typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
+typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
-Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
bool LookThroughAnyExt = false) {
if (!MI || !IsConstantOpcode(MI))
return std::nullopt;
- Optional<APInt> MaybeVal = getAPCstValue(MI);
+ std::optional<APInt> MaybeVal = getAPCstValue(MI);
if (!MaybeVal)
return std::nullopt;
APInt &Val = *MaybeVal;
return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
}
-Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
+std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
const MachineOperand &CstVal = MI->getOperand(1);
if (CstVal.isCImm())
return CstVal.getCImm()->getValue();
return std::nullopt;
}
-Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
+std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
const MachineOperand &CstVal = MI->getOperand(1);
if (CstVal.isCImm())
return CstVal.getCImm()->getValue();
} // end anonymous namespace
-Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
getCImmAsAPInt, LookThroughInstrs);
}
-Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
bool LookThroughAnyExt) {
return getConstantVRegValWithLookThrough(
LookThroughAnyExt);
}
-Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
+std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
auto Reg = getConstantVRegValWithLookThrough(
VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
return MI->getOperand(1).getFPImm();
}
-Optional<DefinitionAndSourceRegister>
+std::optional<DefinitionAndSourceRegister>
llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
Register DefSrcReg = Reg;
auto *DefMI = MRI.getVRegDef(Reg);
MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
const MachineRegisterInfo &MRI) {
- Optional<DefinitionAndSourceRegister> DefSrcReg =
+ std::optional<DefinitionAndSourceRegister> DefSrcReg =
getDefSrcRegIgnoringCopies(Reg, MRI);
return DefSrcReg ? DefSrcReg->MI : nullptr;
}
Register llvm::getSrcRegIgnoringCopies(Register Reg,
const MachineRegisterInfo &MRI) {
- Optional<DefinitionAndSourceRegister> DefSrcReg =
+ std::optional<DefinitionAndSourceRegister> DefSrcReg =
getDefSrcRegIgnoringCopies(Reg, MRI);
return DefSrcReg ? DefSrcReg->Reg : Register();
}
return APF;
}
-Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
- const Register Op2,
- const MachineRegisterInfo &MRI) {
+std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
+ const Register Op1,
+ const Register Op2,
+ const MachineRegisterInfo &MRI) {
auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
if (!MaybeOp2Cst)
return std::nullopt;
return std::nullopt;
}
-Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
- const Register Op2,
- const MachineRegisterInfo &MRI) {
+std::optional<APFloat>
+llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
+ const Register Op2, const MachineRegisterInfo &MRI) {
const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
if (!Op2Cst)
return std::nullopt;
return LiveIn;
}
-Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
- uint64_t Imm,
- const MachineRegisterInfo &MRI) {
+std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
+ const Register Op1, uint64_t Imm,
+ const MachineRegisterInfo &MRI) {
auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
if (MaybeOp1Cst) {
switch (Opcode) {
return std::nullopt;
}
-Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
- Register Src,
- const MachineRegisterInfo &MRI) {
+std::optional<APFloat>
+llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
+ const MachineRegisterInfo &MRI) {
assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
APFloat DstVal(getFltSemanticForLLT(DstTy));
return std::nullopt;
}
-Optional<SmallVector<unsigned>>
+std::optional<SmallVector<unsigned>>
llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
LLT Ty = MRI.getType(Src);
SmallVector<unsigned> FoldedCTLZs;
bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
GISelKnownBits *KB) {
- Optional<DefinitionAndSourceRegister> DefSrcReg =
+ std::optional<DefinitionAndSourceRegister> DefSrcReg =
getDefSrcRegIgnoringCopies(Reg, MRI);
if (!DefSrcReg)
return false;
return LLT::scalar(GCD);
}
-Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
+std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
"Only G_SHUFFLE_VECTOR can have a splat index!");
ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
namespace {
-Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
- const MachineRegisterInfo &MRI,
- bool AllowUndef) {
+std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
+ const MachineRegisterInfo &MRI,
+ bool AllowUndef) {
MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
if (!MI)
return std::nullopt;
if (!isBuildVectorOp(MI->getOpcode()))
return std::nullopt;
- Optional<ValueAndVReg> SplatValAndReg;
+ std::optional<ValueAndVReg> SplatValAndReg;
for (MachineOperand &Op : MI->uses()) {
Register Element = Op.getReg();
auto ElementValAndReg =
AllowUndef);
}
-Optional<APInt> llvm::getIConstantSplatVal(const Register Reg,
- const MachineRegisterInfo &MRI) {
+std::optional<APInt>
+llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
if (auto SplatValAndReg =
getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
- Optional<ValueAndVReg> ValAndVReg =
+ std::optional<ValueAndVReg> ValAndVReg =
getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
return ValAndVReg->Value;
}
return std::nullopt;
}
-Optional<APInt> llvm::getIConstantSplatVal(const MachineInstr &MI,
- const MachineRegisterInfo &MRI) {
+std::optional<APInt>
+llvm::getIConstantSplatVal(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) {
return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
}
-Optional<int64_t>
+std::optional<int64_t>
llvm::getIConstantSplatSExtVal(const Register Reg,
const MachineRegisterInfo &MRI) {
if (auto SplatValAndReg =
return std::nullopt;
}
-Optional<int64_t>
+std::optional<int64_t>
llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
}
-Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
- const MachineRegisterInfo &MRI,
- bool AllowUndef) {
+std::optional<FPValueAndVReg>
+llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
+ bool AllowUndef) {
if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
return std::nullopt;
return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
}
-Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
- const MachineRegisterInfo &MRI) {
+std::optional<RegOrConstant>
+llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
unsigned Opc = MI.getOpcode();
if (!isBuildVectorOp(Opc))
return std::nullopt;
return true;
}
-Optional<APInt>
+std::optional<APInt>
llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
const MachineRegisterInfo &MRI) {
Register Def = MI.getOperand(0).getReg();
/// If non-None, then an instruction in \p Insts that also must be
/// hoisted.
- Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
+ std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
/*implicit*/ DependenceResult(
bool CanReorder,
- Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
+ std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
: CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
assert((!PotentialDependence || CanReorder) &&
"!CanReorder && PotentialDependence.hasValue() not allowed!");
assert(llvm::all_of(Block, canHandle) && "Check this first!");
assert(!is_contained(Block, MI) && "Block must be exclusive of MI!");
- Optional<ArrayRef<MachineInstr *>::iterator> Dep;
+ std::optional<ArrayRef<MachineInstr *>::iterator> Dep;
for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
if (canReorder(*I, MI))
Masks.push_back(std::make_pair(MO, InstID));
}
-Optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) {
+std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) {
SpillLocationNo SpillID(SpillLocs.idFor(L));
if (SpillID.id() == 0) {
// void InstrRefBasedLDV::printVarLocInMBB(..)
#endif
-Optional<SpillLocationNo>
+std::optional<SpillLocationNo>
InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) {
assert(MI.hasOneMemOperand() &&
"Spill instruction does not have exactly one memory operand?");
return MTracker->getOrTrackSpillLoc({Reg, Offset});
}
-Optional<LocIdx>
+std::optional<LocIdx>
InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) {
- Optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI);
+ std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI);
if (!SpillLoc)
return std::nullopt;
// Default machine value number is <None> -- if no instruction defines
// the corresponding value, it must have been optimized out.
- Optional<ValueIDNum> NewID;
+ std::optional<ValueIDNum> NewID;
// Try to lookup the instruction number, and find the machine value number
// that it defines. It could be an instruction, or a PHI.
// a register def was folded into a stack store.
if (OpNo == MachineFunction::DebugOperandMemNumber &&
TargetInstr.hasOneMemOperand()) {
- Optional<LocIdx> L = findLocationForMemOperand(TargetInstr);
+ std::optional<LocIdx> L = findLocationForMemOperand(TargetInstr);
if (L)
NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L);
} else if (OpNo != MachineFunction::DebugOperandMemNumber) {
Register Base;
StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base);
SpillLoc SL = {Base, Offs};
- Optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL);
+ std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL);
// We might be able to find a value, but have chosen not to, to avoid
// tracking too much stack information.
// If this instruction writes to a spill slot, def that slot.
if (hasFoldedStackStore(MI)) {
- if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) {
+ if (std::optional<SpillLocationNo> SpillNo =
+ extractSpillBaseRegAndOffset(MI)) {
for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) {
unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I);
LocIdx L = MTracker->getSpillMLoc(SpillID);
// Tell TTracker about any folded stack store.
if (hasFoldedStackStore(MI)) {
- if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) {
+ if (std::optional<SpillLocationNo> SpillNo =
+ extractSpillBaseRegAndOffset(MI)) {
for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) {
unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I);
LocIdx L = MTracker->getSpillMLoc(SpillID);
}
}
-Optional<SpillLocationNo>
+std::optional<SpillLocationNo>
InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI,
MachineFunction *MF) {
// TODO: Handle multiple stores folded into one.
return Reg != 0;
}
-Optional<SpillLocationNo>
+std::optional<SpillLocationNo>
InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI,
MachineFunction *MF, unsigned &Reg) {
if (!MI.hasOneMemOperand())
// First, if there are any DBG_VALUEs pointing at a spill slot that is
// written to, terminate that variable location. The value in memory
// will have changed. DbgEntityHistoryCalculator doesn't try to detect this.
- if (Optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) {
+ if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) {
// Un-set this location and clobber, so that earlier locations don't
// continue past this store.
for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) {
unsigned SpillID = MTracker->getLocID(Loc, {Size, 0});
DoTransfer(Reg, SpillID);
} else {
- Optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg);
+ std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg);
if (!Loc)
return false;
continue;
}
- Optional<ValueIDNum> JoinedOpLoc =
+ std::optional<ValueIDNum> JoinedOpLoc =
pickOperandPHILoc(Idx, MBB, LiveOuts, MOutLocs, BlockOrders);
if (!JoinedOpLoc)
return true;
}
-Optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc(
+std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc(
unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts,
FuncValueTable &MOutLocs,
const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) {
} // end namespace llvm
-Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(
+std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(
MachineFunction &MF, const ValueTable *MLiveOuts,
const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) {
assert(MLiveOuts && MLiveIns &&
if (SeenDbgPHIIt != SeenDbgPHIs.end())
return SeenDbgPHIIt->second;
- Optional<ValueIDNum> Result =
+ std::optional<ValueIDNum> Result =
resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum);
SeenDbgPHIs.insert({&Here, Result});
return Result;
}
-Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl(
+std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl(
MachineFunction &MF, const ValueTable *MLiveOuts,
const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) {
// Pick out records of DBG_PHI instructions that have been observed. If there
/// Find LocIdx for SpillLoc \p L, creating a new one if it's not tracked.
/// Returns std::nullopt when in scenarios where a spill slot could be
/// tracked, but we would likely run into resource limitations.
- Optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L);
+ std::optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L);
// Get LocIdx of a spill ID.
LocIdx getSpillMLoc(unsigned SpillID) {
MachineBasicBlock *MBB;
/// The value number read by the DBG_PHI -- or std::nullopt if it didn't
/// refer to a value.
- Optional<ValueIDNum> ValueRead;
+ std::optional<ValueIDNum> ValueRead;
/// Register/Stack location the DBG_PHI reads -- or std::nullopt if it
/// referred to something unexpected.
- Optional<LocIdx> ReadLoc;
+ std::optional<LocIdx> ReadLoc;
operator unsigned() const { return InstrNum; }
};
/// DBG_INSTR_REFs that call resolveDbgPHIs. These variable references solve
/// a mini SSA problem caused by DBG_PHIs being cloned, this collection caches
/// the result.
- DenseMap<MachineInstr *, Optional<ValueIDNum>> SeenDbgPHIs;
+ DenseMap<MachineInstr *, std::optional<ValueIDNum>> SeenDbgPHIs;
DbgOpIDMap DbgOpStore;
StringRef StackProbeSymbolName;
/// Tests whether this instruction is a spill to a stack slot.
- Optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI,
- MachineFunction *MF);
+ std::optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI,
+ MachineFunction *MF);
/// Decide if @MI is a spill instruction and return true if it is. We use 2
/// criteria to make this decision:
/// If a given instruction is identified as a spill, return the spill slot
/// and set \p Reg to the spilled register.
- Optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI,
- MachineFunction *MF, unsigned &Reg);
+ std::optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI,
+ MachineFunction *MF,
+ unsigned &Reg);
/// Given a spill instruction, extract the spill slot information, ensure it's
/// tracked, and return the spill number.
- Optional<SpillLocationNo>
+ std::optional<SpillLocationNo>
extractSpillBaseRegAndOffset(const MachineInstr &MI);
/// Observe a single instruction while stepping through a block.
/// \p Here the position of a DBG_INSTR_REF seeking a machine value number
/// \p InstrNum Debug instruction number defined by DBG_PHI instructions.
/// \returns The machine value number at position Here, or std::nullopt.
- Optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF,
- const ValueTable *MLiveOuts,
- const ValueTable *MLiveIns,
- MachineInstr &Here, uint64_t InstrNum);
-
- Optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF,
- const ValueTable *MLiveOuts,
- const ValueTable *MLiveIns,
- MachineInstr &Here,
- uint64_t InstrNum);
+ std::optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF,
+ const ValueTable *MLiveOuts,
+ const ValueTable *MLiveIns,
+ MachineInstr &Here,
+ uint64_t InstrNum);
+
+ std::optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF,
+ const ValueTable *MLiveOuts,
+ const ValueTable *MLiveIns,
+ MachineInstr &Here,
+ uint64_t InstrNum);
/// Step through the function, recording register definitions and movements
/// in an MLocTracker. Convert the observations into a per-block transfer
const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs,
const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders);
- Optional<ValueIDNum> pickOperandPHILoc(
+ std::optional<ValueIDNum> pickOperandPHILoc(
unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts,
FuncValueTable &MOutLocs,
const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders);
&& !MemOperand->getPseudoValue()->isAliased(MFI);
}
- Optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI);
+ std::optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI);
};
} // namespace LiveDebugValues
/// Insert a set of ranges.
void insertFromLocSet(const VarLocSet &ToLoad, const VarLocMap &Map);
- llvm::Optional<LocIndices> getEntryValueBackup(DebugVariable Var);
+ std::optional<LocIndices> getEntryValueBackup(DebugVariable Var);
/// Empty the set.
void clear() {
/// If a given instruction is identified as a spill, return the spill location
/// and set \p Reg to the spilled register.
- Optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI,
- MachineFunction *MF,
- Register &Reg);
+ std::optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI,
+ MachineFunction *MF,
+ Register &Reg);
/// Given a spill instruction, extract the register and offset used to
/// address the spill location in a target independent way.
VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI);
/// Return the Loc ID of an entry value backup location, if it exists for the
/// variable.
-llvm::Optional<LocIndices>
+std::optional<LocIndices>
VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) {
auto It = EntryValuesBackupVars.find(Var);
if (It != EntryValuesBackupVars.end())
continue;
auto DebugVar = VL.Var;
- Optional<LocIndices> EntryValBackupIDs =
+ std::optional<LocIndices> EntryValBackupIDs =
OpenRanges.getEntryValueBackup(DebugVar);
// If the parameter has the entry value backup, it means we should
return false;
}
-Optional<VarLocBasedLDV::VarLoc::SpillLoc>
+std::optional<VarLocBasedLDV::VarLoc::SpillLoc>
VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI,
- MachineFunction *MF, Register &Reg) {
+ MachineFunction *MF, Register &Reg) {
if (!MI.hasOneMemOperand())
return std::nullopt;
MachineFunction *MF = MI.getMF();
TransferKind TKind;
Register Reg;
- Optional<VarLoc::SpillLoc> Loc;
+ std::optional<VarLoc::SpillLoc> Loc;
LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump(););
/// VNInfo.
/// \param [out] Kills Append end points of VNI's live range to Kills.
/// \param LIS Live intervals analysis.
- void extendDef(SlotIndex Idx, DbgVariableValue DbgValue,
- SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>>
- &LiveIntervalInfo,
- Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
- LiveIntervals &LIS);
+ void
+ extendDef(SlotIndex Idx, DbgVariableValue DbgValue,
+ SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>>
+ &LiveIntervalInfo,
+ std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
+ LiveIntervals &LIS);
/// The value in LI may be copies to other registers. Determine if
/// any of the copies are available at the kill points, and add defs if
SlotIndex Idx, DbgVariableValue DbgValue,
SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>>
&LiveIntervalInfo,
- Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
+ std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
LiveIntervals &LIS) {
SlotIndex Start = Idx;
MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start);
LIs[LocNo] = {LI, VNI};
}
if (ShouldExtendDef) {
- Optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills;
+ std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills;
extendDef(Idx, DbgValue, LIs, Kills, LIS);
if (Kills) {
MachineOperand Operand;
StringRef::iterator Begin;
StringRef::iterator End;
- Optional<unsigned> TiedDefIdx;
+ std::optional<unsigned> TiedDefIdx;
ParsedMachineOperand(const MachineOperand &Operand, StringRef::iterator Begin,
- StringRef::iterator End, Optional<unsigned> &TiedDefIdx)
+ StringRef::iterator End,
+ std::optional<unsigned> &TiedDefIdx)
: Operand(Operand), Begin(Begin), End(End), TiedDefIdx(TiedDefIdx) {
if (TiedDefIdx)
assert(Operand.isReg() && Operand.isUse() &&
bool parseSubRegisterIndex(unsigned &SubReg);
bool parseRegisterTiedDefIndex(unsigned &TiedDefIdx);
bool parseRegisterOperand(MachineOperand &Dest,
- Optional<unsigned> &TiedDefIdx, bool IsDef = false);
+ std::optional<unsigned> &TiedDefIdx,
+ bool IsDef = false);
bool parseImmediateOperand(MachineOperand &Dest);
bool parseIRConstant(StringRef::iterator Loc, StringRef StringValue,
const Constant *&C);
bool parseLiveoutRegisterMaskOperand(MachineOperand &Dest);
bool parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
MachineOperand &Dest,
- Optional<unsigned> &TiedDefIdx);
+ std::optional<unsigned> &TiedDefIdx);
bool parseMachineOperandAndTargetFlags(const unsigned OpCode,
const unsigned OpIdx,
MachineOperand &Dest,
- Optional<unsigned> &TiedDefIdx);
+ std::optional<unsigned> &TiedDefIdx);
bool parseOffset(int64_t &Offset);
bool parseIRBlockAddressTaken(BasicBlock *&BB);
bool parseAlignment(uint64_t &Alignment);
bool parseAddrspace(unsigned &Addrspace);
- bool parseSectionID(Optional<MBBSectionID> &SID);
- bool parseBBID(Optional<unsigned> &BBID);
+ bool parseSectionID(std::optional<MBBSectionID> &SID);
+ bool parseBBID(std::optional<unsigned> &BBID);
bool parseOperandsOffset(MachineOperand &Op);
bool parseIRValue(const Value *&V);
bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags);
}
// Parse Machine Basic Block Section ID.
-bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) {
+bool MIParser::parseSectionID(std::optional<MBBSectionID> &SID) {
assert(Token.is(MIToken::kw_bbsections));
lex();
if (Token.is(MIToken::IntegerLiteral)) {
}
// Parse Machine Basic Block ID.
-bool MIParser::parseBBID(Optional<unsigned> &BBID) {
+bool MIParser::parseBBID(std::optional<unsigned> &BBID) {
assert(Token.is(MIToken::kw_bb_id));
lex();
unsigned Value = 0;
bool IsLandingPad = false;
bool IsInlineAsmBrIndirectTarget = false;
bool IsEHFuncletEntry = false;
- Optional<MBBSectionID> SectionID;
+ std::optional<MBBSectionID> SectionID;
uint64_t Alignment = 0;
- Optional<unsigned> BBID;
+ std::optional<unsigned> BBID;
BasicBlock *BB = nullptr;
if (consumeIfPresent(MIToken::lparen)) {
do {
SmallVector<ParsedMachineOperand, 8> Operands;
while (Token.isRegister() || Token.isRegisterFlag()) {
auto Loc = Token.location();
- Optional<unsigned> TiedDefIdx;
+ std::optional<unsigned> TiedDefIdx;
if (parseRegisterOperand(MO, TiedDefIdx, /*IsDef=*/true))
return true;
Operands.push_back(
Token.isNot(MIToken::kw_debug_instr_number) &&
Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) {
auto Loc = Token.location();
- Optional<unsigned> TiedDefIdx;
+ std::optional<unsigned> TiedDefIdx;
if (parseMachineOperandAndTargetFlags(OpCode, Operands.size(), MO, TiedDefIdx))
return true;
Operands.push_back(
}
bool MIParser::parseRegisterOperand(MachineOperand &Dest,
- Optional<unsigned> &TiedDefIdx,
+ std::optional<unsigned> &TiedDefIdx,
bool IsDef) {
unsigned Flags = IsDef ? RegState::Define : 0;
while (Token.isRegisterFlag()) {
bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
MachineOperand &Dest,
- Optional<unsigned> &TiedDefIdx) {
+ std::optional<unsigned> &TiedDefIdx) {
switch (Token.kind()) {
case MIToken::kw_implicit:
case MIToken::kw_implicit_define:
bool MIParser::parseMachineOperandAndTargetFlags(
const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest,
- Optional<unsigned> &TiedDefIdx) {
+ std::optional<unsigned> &TiedDefIdx) {
unsigned TF = 0;
bool HasTargetFlags = false;
if (Token.is(MIToken::kw_target_flags)) {
}
bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) {
- Optional<float> CachedReward;
+ std::optional<float> CachedReward;
auto GetReward = [&]() {
if (!CachedReward)
CachedReward = static_cast<float>(
return Size;
}
-Optional<unsigned>
+std::optional<unsigned>
MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
int FI;
if (TII->isStoreToStackSlotPostFE(*this, FI)) {
return std::nullopt;
}
-Optional<unsigned>
+std::optional<unsigned>
MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
MMOList Accesses;
if (TII->hasStoreToStackSlot(*this, Accesses))
return std::nullopt;
}
-Optional<unsigned>
+std::optional<unsigned>
MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
int FI;
if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
return std::nullopt;
}
-Optional<unsigned>
+std::optional<unsigned>
MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
MMOList Accesses;
if (TII->hasLoadFromStackSlot(*this, Accesses))
}
void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
- LLT TypeToPrint, Optional<unsigned> OpIdx, bool PrintDef,
- bool IsStandalone, bool ShouldPrintRegisterTies,
+ LLT TypeToPrint, std::optional<unsigned> OpIdx,
+ bool PrintDef, bool IsStandalone,
+ bool ShouldPrintRegisterTies,
unsigned TiedOperandIdx,
const TargetRegisterInfo *TRI,
const TargetIntrinsicInfo *IntrinsicInfo) const {
// Insert a phi that carries LoopReg from the loop body and InitReg otherwise.
// If InitReg is not given it is chosen arbitrarily. It will either be undef
// or will be chosen so as to share another phi.
- Register phi(Register LoopReg, Optional<Register> InitReg = {},
+ Register phi(Register LoopReg, std::optional<Register> InitReg = {},
const TargetRegisterClass *RC = nullptr);
// Create an undef register of the given register class.
Register undef(const TargetRegisterClass *RC);
// First, dive through the phi chain to find the defaults for the generated
// phis.
- SmallVector<Optional<Register>, 4> Defaults;
+ SmallVector<std::optional<Register>, 4> Defaults;
Register LoopReg = Reg;
auto LoopProducer = Producer;
while (LoopProducer->isPHI() && LoopProducer->getParent() == BB) {
}
int LoopProducerStage = S.getStage(LoopProducer);
- Optional<Register> IllegalPhiDefault;
+ std::optional<Register> IllegalPhiDefault;
if (LoopProducerStage == -1) {
// Do nothing.
// If we need more phis than we have defaults for, pad out with undefs for
// the earliest phis, which are at the end of the defaults chain (the
// chain is in reverse order).
- Defaults.resize(Defaults.size() + StageDiff, Defaults.empty()
- ? Optional<Register>()
- : Defaults.back());
+ Defaults.resize(Defaults.size() + StageDiff,
+ Defaults.empty() ? std::optional<Register>()
+ : Defaults.back());
}
}
return LoopReg;
}
-Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
+Register KernelRewriter::phi(Register LoopReg, std::optional<Register> InitReg,
const TargetRegisterClass *RC) {
// If the init register is not undef, try and find an existing phi.
if (InitReg) {
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
: MI(MI), CommutePair(std::make_pair(Idx1, Idx2)) {}
MachineInstr *getMI() const { return MI; }
- Optional<IndexPair> getCommutePair() const { return CommutePair; }
+ std::optional<IndexPair> getCommutePair() const { return CommutePair; }
private:
MachineInstr *MI;
- Optional<IndexPair> CommutePair;
+ std::optional<IndexPair> CommutePair;
};
/// Helper class to hold a reply for ValueTracker queries.
#define LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/Register.h"
// Get the upper limit of elements in the given Order we need to analize.
// TODO: is this heuristic, we could consider learning it.
- Optional<unsigned> getOrderLimit(const LiveInterval &VirtReg,
- const AllocationOrder &Order,
- unsigned CostPerUseLimit) const;
+ std::optional<unsigned> getOrderLimit(const LiveInterval &VirtReg,
+ const AllocationOrder &Order,
+ unsigned CostPerUseLimit) const;
// Determine if it's worth trying to allocate this reg, given the
// CostPerUseLimit
return !Matrix->isPhysRegUsed(PhysReg);
}
-Optional<unsigned>
+std::optional<unsigned>
RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg,
const AllocationOrder &Order,
unsigned CostPerUseLimit) const {
SmallPtrSet<const Instruction *, 2> getSIset(const SelectGroups &SIGroups);
// Returns the latency cost of a given instruction.
- Optional<uint64_t> computeInstCost(const Instruction *I);
+ std::optional<uint64_t> computeInstCost(const Instruction *I);
// Returns the misprediction cost of a given select when converted to branch.
Scaled64 getMispredictionCost(const SelectInst *SI, const Scaled64 CondCost);
return SIset;
}
-Optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) {
+std::optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) {
InstructionCost ICost =
TTI->getInstructionCost(I, TargetTransformInfo::TCK_Latency);
if (auto OC = ICost.getValue())
- return Optional<uint64_t>(*OC);
+ return std::optional<uint64_t>(*OC);
return std::nullopt;
}
/// LOAD
///
/// *ExtractVectorElement
-static const Optional<ByteProvider>
+static const std::optional<ByteProvider>
calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
std::optional<uint64_t> VectorIndex,
unsigned StartingIndex = 0) {
if (Index >= NarrowByteWidth)
return Op.getOpcode() == ISD::ZERO_EXTEND
- ? Optional<ByteProvider>(ByteProvider::getConstantZero())
+ ? std::optional<ByteProvider>(ByteProvider::getConstantZero())
: std::nullopt;
return calculateByteProvider(NarrowOp, Index, Depth + 1, VectorIndex,
StartingIndex);
// question
if (Index >= NarrowByteWidth)
return L->getExtensionType() == ISD::ZEXTLOAD
- ? Optional<ByteProvider>(ByteProvider::getConstantZero())
+ ? std::optional<ByteProvider>(ByteProvider::getConstantZero())
: std::nullopt;
unsigned BPVectorIndex = VectorIndex.value_or(0U);
// Check if the bytes offsets we are looking at match with either big or
// little endian value loaded. Return true for big endian, false for little
// endian, and std::nullopt if match failed.
-static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
- int64_t FirstOffset) {
+static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
+ int64_t FirstOffset) {
// The endian can be decided only when it is 2 bytes at least.
unsigned Width = ByteOffsets.size();
if (Width < 2)
SDValue Chain;
SmallPtrSet<LoadSDNode *, 8> Loads;
- Optional<ByteProvider> FirstByteProvider;
+ std::optional<ByteProvider> FirstByteProvider;
int64_t FirstOffset = INT64_MAX;
// Check if all the bytes of the OR we are looking at are loaded from the same
// Check if the bytes of the OR we are looking at match with either big or
// little endian value load
- Optional<bool> IsBigEndian = isBigEndian(
+ std::optional<bool> IsBigEndian = isBigEndian(
makeArrayRef(ByteOffsets).drop_back(ZeroExtendedBytes), FirstOffset);
if (!IsBigEndian)
return SDValue();
bool IsAtomic;
SDValue BasePtr;
int64_t Offset;
- Optional<int64_t> NumBytes;
+ std::optional<int64_t> NumBytes;
MachineMemOperand *MMO;
};
: 0;
uint64_t Size =
MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize());
- return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(),
+ return {LSN->isVolatile(),
+ LSN->isAtomic(),
+ LSN->getBasePtr(),
Offset /*base offset*/,
- Optional<int64_t>(Size),
+ std::optional<int64_t>(Size),
LSN->getMemOperand()};
}
if (const auto *LN = cast<LifetimeSDNode>(N))
- return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1),
+ return {false /*isVolatile*/,
+ /*isAtomic*/ false,
+ LN->getOperand(1),
(LN->hasOffset()) ? LN->getOffset() : 0,
- (LN->hasOffset()) ? Optional<int64_t>(LN->getSize())
- : Optional<int64_t>(),
+ (LN->hasOffset()) ? std::optional<int64_t>(LN->getSize())
+ : std::optional<int64_t>(),
(MachineMemOperand *)nullptr};
// Default.
- return {false /*isvolatile*/, /*isAtomic*/ false, SDValue(),
- (int64_t)0 /*offset*/,
- Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr};
+ return {false /*isvolatile*/,
+ /*isAtomic*/ false, SDValue(),
+ (int64_t)0 /*offset*/, std::optional<int64_t>() /*size*/,
+ (MachineMemOperand *)nullptr};
};
MemUseCharacteristics MUC0 = getCharacteristics(Op0),
// Align: If 0, don't allow use of a wider type
// WidenEx: If Align is not 0, the amount additional we can load/store from.
-static Optional<EVT> findMemType(SelectionDAG &DAG, const TargetLowering &TLI,
- unsigned Width, EVT WidenVT,
- unsigned Align = 0, unsigned WidenEx = 0) {
+static std::optional<EVT> findMemType(SelectionDAG &DAG,
+ const TargetLowering &TLI, unsigned Width,
+ EVT WidenVT, unsigned Align = 0,
+ unsigned WidenEx = 0) {
EVT WidenEltVT = WidenVT.getVectorElementType();
const bool Scalable = WidenVT.isScalableVector();
unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinSize();
(!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value();
// Find the vector type that can load from.
- Optional<EVT> FirstVT =
+ std::optional<EVT> FirstVT =
findMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
WidthDiff.getKnownMinSize());
// Unless we're able to load in one instruction we must work out how to load
// the remainder.
if (!TypeSize::isKnownLE(LdWidth, FirstVTWidth)) {
- Optional<EVT> NewVT = FirstVT;
+ std::optional<EVT> NewVT = FirstVT;
TypeSize RemainingWidth = LdWidth;
TypeSize NewVTWidth = FirstVTWidth;
do {
while (StWidth.isNonZero()) {
// Find the largest vector type we can store with.
- Optional<EVT> NewVT =
+ std::optional<EVT> NewVT =
findMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT);
if (!NewVT)
return false;
}
/// The operand position of the vector mask.
-Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
+std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
switch (Opcode) {
default:
return std::nullopt;
}
/// The operand position of the explicit vector length parameter.
-Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
+std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
switch (Opcode) {
default:
return std::nullopt;
return V;
}
-static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
- const APInt &C2) {
+static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
+ const APInt &C2) {
switch (Opcode) {
case ISD::ADD: return C1 + C2;
case ISD::SUB: return C1 - C2;
// Handle constant folding with UNDEF.
// TODO: Handle more cases.
-static llvm::Optional<APInt> FoldValueWithUndef(unsigned Opcode,
- const APInt &C1, bool IsUndef1,
- const APInt &C2,
- bool IsUndef2) {
+static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1,
+ bool IsUndef1, const APInt &C2,
+ bool IsUndef2) {
if (!(IsUndef1 || IsUndef2))
return FoldValue(Opcode, C1, C2);
if (C1->isOpaque() || C2->isOpaque())
return SDValue();
- Optional<APInt> FoldAttempt =
+ std::optional<APInt> FoldAttempt =
FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
if (!FoldAttempt)
return SDValue();
BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
SmallVector<APInt> RawBits;
for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
- Optional<APInt> Fold = FoldValueWithUndef(
+ std::optional<APInt> Fold = FoldValueWithUndef(
Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]);
if (!Fold)
break;
return true;
}
-Optional<std::pair<APInt, APInt>>
+std::optional<std::pair<APInt, APInt>>
BuildVectorSDNode::isConstantSequence() const {
unsigned NumOps = getNumOperands();
if (NumOps < 2)
}
bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
- const Optional<int64_t> NumBytes0,
+ const std::optional<int64_t> NumBytes0,
const SDNode *Op1,
- const Optional<int64_t> NumBytes1,
+ const std::optional<int64_t> NumBytes1,
const SelectionDAG &DAG, bool &IsAlias) {
BaseIndexOffset BasePtr0 = match(Op0, DAG);
/// Utility function for reservePreviousStackSlotForValue. Tries to find
/// stack slot index to which we have spilled value for previous statepoints.
/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
-static Optional<int> findPreviousSpillSlot(const Value *Val,
- SelectionDAGBuilder &Builder,
- int LookUpDepth) {
+static std::optional<int> findPreviousSpillSlot(const Value *Val,
+ SelectionDAGBuilder &Builder,
+ int LookUpDepth) {
// Can not look any further - give up now
if (LookUpDepth <= 0)
return std::nullopt;
// All incoming values should have same known stack slot, otherwise result
// is unknown.
if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
- Optional<int> MergedResult;
+ std::optional<int> MergedResult;
for (const auto &IncomingValue : Phi->incoming_values()) {
- Optional<int> SpillSlot =
+ std::optional<int> SpillSlot =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
if (!SpillSlot)
return std::nullopt;
return;
const int LookUpDepth = 6;
- Optional<int> Index =
+ std::optional<int> Index =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
if (!Index)
return;
using namespace llvm;
-Optional<RegOrConstant>
+std::optional<RegOrConstant>
AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
if (auto Splat = getVectorSplat(MI, MRI))
return RegOrConstant(Src);
}
-Optional<int64_t>
+std::optional<int64_t>
AArch64GISelUtils::getAArch64VectorSplatScalar(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
auto Splat = getAArch64VectorSplat(MI, MRI);
/// \returns A value when \p MI is a vector splat of a Register or constant.
/// Checks for generic opcodes and AArch64-specific generic opcodes.
-Optional<RegOrConstant> getAArch64VectorSplat(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
+std::optional<RegOrConstant>
+getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI);
/// \returns A value when \p MI is a constant vector splat.
/// Checks for generic opcodes and AArch64-specific generic opcodes.
-Optional<int64_t> getAArch64VectorSplatScalar(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
+std::optional<int64_t>
+getAArch64VectorSplatScalar(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
/// \returns true if \p MaybeSub and \p Pred are part of a CMN tree for an
/// integer compare.
/// The lane inserted into is defined by \p LaneIdx. The vector source
/// register is given by \p SrcReg. The register containing the element is
/// given by \p EltReg.
- MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg,
+ MachineInstr *emitLaneInsert(std::optional<Register> DstReg, Register SrcReg,
Register EltReg, unsigned LaneIdx,
const RegisterBank &RB,
MachineIRBuilder &MIRBuilder) const;
MachineIRBuilder &MIRBuilder) const;
// Emit a vector concat operation.
- MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1,
+ MachineInstr *emitVectorConcat(std::optional<Register> Dst, Register Op1,
Register Op2,
MachineIRBuilder &MIRBuilder) const;
/// \p Pred if given is the intended predicate to use.
MachineInstr *
emitFPCompare(Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
- Optional<CmpInst::Predicate> = std::nullopt) const;
+ std::optional<CmpInst::Predicate> = std::nullopt) const;
MachineInstr *
emitInstr(unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS,
AArch64CC::CondCode CC,
MachineIRBuilder &MIRBuilder) const;
- MachineInstr *emitExtractVectorElt(Optional<Register> DstReg,
+ MachineInstr *emitExtractVectorElt(std::optional<Register> DstReg,
const RegisterBank &DstRB, LLT ScalarTy,
Register VecReg, unsigned LaneIdx,
MachineIRBuilder &MIRBuilder) const;
return createTuple(Regs, RegClassIDs, SubRegs, MIB);
}
-static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
+static std::optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
auto &MI = *Root.getParent();
auto &MBB = *MI.getParent();
auto &MF = *MBB.getParent();
/// Returns the element immediate value of a vector shift operand if found.
/// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR.
-static Optional<int64_t> getVectorShiftImm(Register Reg,
- MachineRegisterInfo &MRI) {
+static std::optional<int64_t> getVectorShiftImm(Register Reg,
+ MachineRegisterInfo &MRI) {
assert(MRI.getType(Reg).isVector() && "Expected a *vector* shift operand");
MachineInstr *OpMI = MRI.getVRegDef(Reg);
return getAArch64VectorSplatScalar(*OpMI, MRI);
/// Matches and returns the shift immediate value for a SHL instruction given
/// a shift operand.
-static Optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg, MachineRegisterInfo &MRI) {
- Optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
+static std::optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg,
+ MachineRegisterInfo &MRI) {
+ std::optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
if (!ShiftImm)
return std::nullopt;
// Check the immediate is in range for a SHL.
// Check if we have a vector of constants on RHS that we can select as the
// immediate form.
- Optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
+ std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
unsigned Opc = 0;
if (Ty == LLT::fixed_vector(2, 64)) {
case TargetOpcode::G_PTRMASK: {
Register MaskReg = I.getOperand(2).getReg();
- Optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
+ std::optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
// TODO: Implement arbitrary cases
if (!MaskVal || !isShiftedMask_64(*MaskVal))
return false;
}
MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
- Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
+ std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
unsigned CopyOpc = 0;
return &*OrMI;
}
-MachineInstr *
-AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS,
- MachineIRBuilder &MIRBuilder,
- Optional<CmpInst::Predicate> Pred) const {
+MachineInstr *AArch64InstructionSelector::emitFPCompare(
+ Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
+ std::optional<CmpInst::Predicate> Pred) const {
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
LLT Ty = MRI.getType(LHS);
if (Ty.isVector())
}
MachineInstr *AArch64InstructionSelector::emitVectorConcat(
- Optional<Register> Dst, Register Op1, Register Op2,
+ std::optional<Register> Dst, Register Op1, Register Op2,
MachineIRBuilder &MIRBuilder) const {
// We implement a vector concat by:
// 1. Use scalar_to_vector to insert the lower vector into the larger dest
LLT OpTy = MRI.getType(LHS);
assert(OpTy.getSizeInBits() == 32 || OpTy.getSizeInBits() == 64);
unsigned CCmpOpc;
- Optional<ValueAndVReg> C;
+ std::optional<ValueAndVReg> C;
if (CmpInst::isIntPredicate(CC)) {
C = getIConstantVRegValWithLookThrough(RHS, MRI);
if (C && C->Value.ult(32))
}
MachineInstr *AArch64InstructionSelector::emitLaneInsert(
- Optional<Register> DstReg, Register SrcReg, Register EltReg,
+ std::optional<Register> DstReg, Register SrcReg, Register EltReg,
unsigned LaneIdx, const RegisterBank &RB,
MachineIRBuilder &MIRBuilder) const {
MachineInstr *InsElt = nullptr;
if (Opc != TargetOpcode::G_AND)
return AArch64_AM::InvalidShiftExtend;
- Optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2));
+ std::optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2));
if (!MaybeAndMask)
return AArch64_AM::InvalidShiftExtend;
uint64_t AndMask = *MaybeAndMask;
if (RootDef->getOpcode() == TargetOpcode::G_SHL) {
// Look for a constant on the RHS of the shift.
MachineOperand &RHS = RootDef->getOperand(2);
- Optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS);
+ std::optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS);
if (!MaybeShiftVal)
return std::nullopt;
ShiftVal = *MaybeShiftVal;
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
- Optional<int64_t> CstVal =
+ std::optional<int64_t> CstVal =
getIConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI);
assert(CstVal && "Expected constant value");
MIB.addImm(*CstVal);
// Print comments that apply to both callable functions and entry points.
void AMDGPUAsmPrinter::emitCommonFunctionComments(
- uint32_t NumVGPR,
- Optional<uint32_t> NumAGPR,
- uint32_t TotalNumVGPR,
- uint32_t NumSGPR,
- uint64_t ScratchSize,
- uint64_t CodeSize,
- const AMDGPUMachineFunction *MFI) {
+ uint32_t NumVGPR, std::optional<uint32_t> NumAGPR, uint32_t TotalNumVGPR,
+ uint32_t NumSGPR, uint64_t ScratchSize, uint64_t CodeSize,
+ const AMDGPUMachineFunction *MFI) {
OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false);
OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false);
OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false);
const AMDGPUResourceUsageAnalysis::SIFunctionResourceInfo &Info =
ResourceUsage->getResourceInfo(&MF.getFunction());
emitCommonFunctionComments(
- Info.NumVGPR,
- STM.hasMAIInsts() ? Info.NumAGPR : Optional<uint32_t>(),
- Info.getTotalNumVGPRs(STM),
- Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
- Info.PrivateSegmentSize,
- getFunctionCodeSize(MF), MFI);
+ Info.NumVGPR,
+ STM.hasMAIInsts() ? Info.NumAGPR : std::optional<uint32_t>(),
+ Info.getTotalNumVGPRs(STM),
+ Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
+ Info.PrivateSegmentSize, getFunctionCodeSize(MF), MFI);
return false;
}
OutStreamer->emitRawComment(" Kernel info:", false);
- emitCommonFunctionComments(CurrentProgramInfo.NumArchVGPR,
- STM.hasMAIInsts()
- ? CurrentProgramInfo.NumAccVGPR
- : Optional<uint32_t>(),
- CurrentProgramInfo.NumVGPR,
- CurrentProgramInfo.NumSGPR,
- CurrentProgramInfo.ScratchSize,
- getFunctionCodeSize(MF), MFI);
+ emitCommonFunctionComments(
+ CurrentProgramInfo.NumArchVGPR,
+ STM.hasMAIInsts() ? CurrentProgramInfo.NumAccVGPR
+ : std::optional<uint32_t>(),
+ CurrentProgramInfo.NumVGPR, CurrentProgramInfo.NumSGPR,
+ CurrentProgramInfo.ScratchSize, getFunctionCodeSize(MF), MFI);
OutStreamer->emitRawComment(
" FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false);
const SIProgramInfo &KernelInfo);
void emitPALFunctionMetadata(const MachineFunction &MF);
void emitCommonFunctionComments(uint32_t NumVGPR,
- Optional<uint32_t> NumAGPR,
- uint32_t TotalNumVGPR,
- uint32_t NumSGPR,
- uint64_t ScratchSize,
- uint64_t CodeSize,
- const AMDGPUMachineFunction* MFI);
+ std::optional<uint32_t> NumAGPR,
+ uint32_t TotalNumVGPR, uint32_t NumSGPR,
+ uint64_t ScratchSize, uint64_t CodeSize,
+ const AMDGPUMachineFunction *MFI);
void emitResourceUsageRemarks(const MachineFunction &MF,
const SIProgramInfo &CurrentProgramInfo,
bool isModuleEntryFunction, bool hasMAIInsts);
} else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
} else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
- Optional<uint32_t> Id =
+ std::optional<uint32_t> Id =
AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction());
if (Id.has_value()) {
MIRBuilder.buildConstant(InputReg, Id.value());
// additional cost to negate them.
static bool isConstantCostlierToNegate(MachineInstr &MI, Register Reg,
MachineRegisterInfo &MRI) {
- Optional<FPValueAndVReg> FPValReg;
+ std::optional<FPValueAndVReg> FPValReg;
if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) {
if (FPValReg->Value.isZero() && !FPValReg->Value.isNegative())
return true;
}
}
-Optional<StringRef>
+std::optional<StringRef>
MetadataStreamerMsgPackV3::getAccessQualifier(StringRef AccQual) const {
- return StringSwitch<Optional<StringRef>>(AccQual)
+ return StringSwitch<std::optional<StringRef>>(AccQual)
.Case("read_only", StringRef("read_only"))
.Case("write_only", StringRef("write_only"))
.Case("read_write", StringRef("read_write"))
.Default(std::nullopt);
}
-Optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier(
+std::optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier(
unsigned AddressSpace) const {
switch (AddressSpace) {
case AMDGPUAS::PRIVATE_ADDRESS:
void verify(StringRef HSAMetadataString) const;
- Optional<StringRef> getAccessQualifier(StringRef AccQual) const;
+ std::optional<StringRef> getAccessQualifier(StringRef AccQual) const;
- Optional<StringRef> getAddressSpaceQualifier(unsigned AddressSpace) const;
+ std::optional<StringRef>
+ getAddressSpaceQualifier(unsigned AddressSpace) const;
StringRef getValueKind(Type *Ty, StringRef TypeQual,
StringRef BaseTypeName) const;
SchedGroupMask SGMask;
// Maximum number of SUnits that can be added to this group.
- Optional<unsigned> MaxSize;
+ std::optional<unsigned> MaxSize;
// SchedGroups will only synchronize with other SchedGroups that have the same
// SyncID.
SchedGroupMask getMask() { return SGMask; }
- SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize,
+ SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize,
ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
: SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {
SGID = NumSchedGroups++;
}
- SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize, int SyncID,
+ SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize, int SyncID,
ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
: SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {
SGID = NumSchedGroups++;
// GFX9 and GFX10 have signed byte immediate offsets. The immediate
// offset for S_BUFFER instructions is unsigned.
int64_t ByteOffset = IsBuffer ? C->getZExtValue() : C->getSExtValue();
- Optional<int64_t> EncodedOffset =
+ std::optional<int64_t> EncodedOffset =
AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, IsBuffer);
if (EncodedOffset && Offset && !Imm32Only) {
*Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
const TargetRegisterClass *SrcRC
= TRI.getConstrainedRegClassForOperand(Src, *MRI);
- Optional<ValueAndVReg> ConstVal =
+ std::optional<ValueAndVReg> ConstVal =
getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
if (ConstVal) {
unsigned MovOpc =
auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
- Optional<ValueAndVReg> ConstSelect =
+ std::optional<ValueAndVReg> ConstSelect =
getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
if (ConstSelect) {
// The selector has to be an inline immediate, so we can use whatever for
MIB.addImm(ConstSelect->Value.getSExtValue() &
maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
} else {
- Optional<ValueAndVReg> ConstVal =
+ std::optional<ValueAndVReg> ConstVal =
getIConstantVRegValWithLookThrough(Val, *MRI);
// If the value written is an inline immediate, we can get away without a
if (Size != STI.getWavefrontSize())
return false;
- Optional<ValueAndVReg> Arg =
+ std::optional<ValueAndVReg> Arg =
getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
if (Arg) {
}
Register VOffset = MI.getOperand(4 + OpOffset).getReg();
- Optional<ValueAndVReg> MaybeVOffset =
+ std::optional<ValueAndVReg> MaybeVOffset =
getIConstantVRegValWithLookThrough(VOffset, *MRI);
const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
return false;
const GEPInfo &GEPI = AddrInfo[0];
- Optional<int64_t> EncodedImm =
+ std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
if (SOffset && Offset) {
const GEPInfo &GEPInfo = AddrInfo[0];
Register PtrReg = GEPInfo.SgprParts[0];
- Optional<int64_t> EncodedImm =
+ std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
if (!EncodedImm)
return std::nullopt;
unsigned ShAmtBits) const {
assert(MI.getOpcode() == TargetOpcode::G_AND);
- Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
+ std::optional<APInt> RHS =
+ getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
if (!RHS)
return false;
return {Root, 0};
MachineOperand &RHS = RootI->getOperand(2);
- Optional<ValueAndVReg> MaybeOffset =
+ std::optional<ValueAndVReg> MaybeOffset =
getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
if (!MaybeOffset)
return {Root, 0};
}
/// Get an immediate that must be 32-bits, and treated as zero extended.
-static Optional<uint64_t> getConstantZext32Val(Register Reg,
- const MachineRegisterInfo &MRI) {
+static std::optional<uint64_t>
+getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
// getIConstantVRegVal sexts any values, so see if that matters.
- Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
+ std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
if (!OffsetVal || !isInt<32>(*OffsetVal))
return std::nullopt;
return Lo_32(*OffsetVal);
InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
- Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
+ std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
if (!OffsetVal)
return {};
- Optional<int64_t> EncodedImm =
+ std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
if (!EncodedImm)
return {};
AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
- Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
+ std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
if (!OffsetVal)
return {};
- Optional<int64_t> EncodedImm
- = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
+ std::optional<int64_t> EncodedImm =
+ AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
if (!EncodedImm)
return {};
if (!SOffset)
return std::nullopt;
- Optional<int64_t> EncodedOffset =
+ std::optional<int64_t> EncodedOffset =
AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
if (!EncodedOffset)
return std::nullopt;
// FIXME: Artifact combiner probably should have replaced the truncated
// constant before this, so we shouldn't need
// getIConstantVRegValWithLookThrough.
- Optional<ValueAndVReg> MaybeIdxVal =
+ std::optional<ValueAndVReg> MaybeIdxVal =
getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
return true;
// FIXME: Artifact combiner probably should have replaced the truncated
// constant before this, so we shouldn't need
// getIConstantVRegValWithLookThrough.
- Optional<ValueAndVReg> MaybeIdxVal =
+ std::optional<ValueAndVReg> MaybeIdxVal =
getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
return true;
MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
Function &F = B.getMF().getFunction();
- Optional<uint32_t> KnownSize =
+ std::optional<uint32_t> KnownSize =
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (KnownSize.has_value())
B.buildConstant(DstReg, KnownSize.value());
Register VOffset, Register SOffset,
unsigned ImmOffset, Register VIndex,
MachineRegisterInfo &MRI) const {
- Optional<ValueAndVReg> MaybeVOffsetVal =
+ std::optional<ValueAndVReg> MaybeVOffsetVal =
getIConstantVRegValWithLookThrough(VOffset, MRI);
- Optional<ValueAndVReg> MaybeSOffsetVal =
+ std::optional<ValueAndVReg> MaybeSOffsetVal =
getIConstantVRegValWithLookThrough(SOffset, MRI);
- Optional<ValueAndVReg> MaybeVIndexVal =
+ std::optional<ValueAndVReg> MaybeVIndexVal =
getIConstantVRegValWithLookThrough(VIndex, MRI);
// If the combined VOffset + SOffset + ImmOffset + strided VIndex is constant,
// update the MMO with that offset. The stride is unknown so we can only do
ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
return legalizeTrapEndpgm(MI, MRI, B);
- if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) {
+ if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
}
}
-Optional<uint32_t>
+std::optional<uint32_t>
AMDGPUMachineFunction::getLDSKernelIdMetadata(const Function &F) {
auto MD = F.getMetadata("llvm.amdgcn.lds.kernel.id");
if (MD && MD->getNumOperands() == 1) {
static bool isKnownAddressLDSGlobal(const GlobalVariable &GV);
static unsigned calculateKnownAddressOfLDSGlobal(const GlobalVariable &GV);
- static Optional<uint32_t> getLDSKernelIdMetadata(const Function &F);
+ static std::optional<uint32_t> getLDSKernelIdMetadata(const Function &F);
Align getDynLDSAlign() const { return DynLDSAlign; }
///
/// \returns \p SSID's inclusion ordering, or "std::nullopt" if \p SSID is not
/// supported by the AMDGPU target.
- Optional<uint8_t> getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
+ std::optional<uint8_t>
+ getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
if (SSID == SyncScope::SingleThread ||
SSID == getSingleThreadOneAddressSpaceSSID())
return 0;
/// synchronization scope \p B, false if synchronization scope \p A is smaller
/// than synchronization scope \p B, or "std::nullopt" if either
/// synchronization scope \p A or \p B is not supported by the AMDGPU target.
- Optional<bool> isSyncScopeInclusion(SyncScope::ID A, SyncScope::ID B) const {
+ std::optional<bool> isSyncScopeInclusion(SyncScope::ID A,
+ SyncScope::ID B) const {
const auto &AIO = getSyncScopeInclusionOrdering(A);
const auto &BIO = getSyncScopeInclusionOrdering(B);
if (!AIO || !BIO)
}
FeatureBitset Features;
- Optional<Attribute> Attributes[NumAttr];
+ std::optional<Attribute> Attributes[NumAttr];
};
class Clone {
void setFeatures(Function &F, const FeatureBitset &NewFeatures);
// Set new function's attributes in place.
- void setAttributes(Function &F, const ArrayRef<Optional<Attribute>> NewAttrs);
+ void setAttributes(Function &F,
+ const ArrayRef<std::optional<Attribute>> NewAttrs);
std::string getFeatureString(const FeatureBitset &Features) const;
F.addFnAttr("target-features", NewFeatureStr);
}
-void AMDGPUPropagateAttributes::setAttributes(Function &F,
- const ArrayRef<Optional<Attribute>> NewAttrs) {
+void AMDGPUPropagateAttributes::setAttributes(
+ Function &F, const ArrayRef<std::optional<Attribute>> NewAttrs) {
LLVM_DEBUG(dbgs() << "Set attributes on " << F.getName() << ":\n");
for (unsigned I = 0; I < NumAttr; ++I) {
F.removeFnAttr(AttributeNames[I]);
MinMaxMedOpc OpcodeTriple = getMinMaxPair(MI.getOpcode());
Register Val;
- Optional<ValueAndVReg> K0, K1;
+ std::optional<ValueAndVReg> K0, K1;
// Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1.
if (!matchMed<GCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
return false;
auto OpcodeTriple = getMinMaxPair(MI.getOpcode());
Register Val;
- Optional<FPValueAndVReg> K0, K1;
+ std::optional<FPValueAndVReg> K0, K1;
// Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1.
if (!matchMed<GFCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
return false;
// Clamp is available on all types after regbankselect (f16, f32, f64, v2f16).
auto OpcodeTriple = getMinMaxPair(MI.getOpcode());
Register Val;
- Optional<FPValueAndVReg> K0, K1;
+ std::optional<FPValueAndVReg> K0, K1;
// Match min(max(Val, K0), K1) or max(min(Val, K1), K0).
if (!matchMed<GFCstOrSplatGFCstMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
return false;
const LLT S32 = LLT::scalar(32);
MachineRegisterInfo *MRI = B.getMRI();
- if (Optional<int64_t> Imm = getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
+ if (std::optional<int64_t> Imm =
+ getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
uint32_t SOffset, ImmOffset;
if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget,
Alignment)) {
/// \param SGPRBlocks [out] Result SGPR block count.
bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
bool FlatScrUsed, bool XNACKUsed,
- Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
- SMRange VGPRRange, unsigned NextFreeSGPR,
- SMRange SGPRRange, unsigned &VGPRBlocks,
- unsigned &SGPRBlocks);
+ std::optional<bool> EnableWavefrontSize32,
+ unsigned NextFreeVGPR, SMRange VGPRRange,
+ unsigned NextFreeSGPR, SMRange SGPRRange,
+ unsigned &VGPRBlocks, unsigned &SGPRBlocks);
bool ParseDirectiveAMDGCNTarget();
bool ParseDirectiveAMDHSAKernel();
bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
bool isRegister();
bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
- Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
+ std::optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
void initializeGprCountSymbol(RegisterKind RegKind);
bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
unsigned RegWidth);
const SMLoc &IDLoc);
bool validateExeczVcczOperands(const OperandVector &Operands);
bool validateTFE(const MCInst &Inst, const OperandVector &Operands);
- Optional<StringRef> validateLdsDirect(const MCInst &Inst);
+ std::optional<StringRef> validateLdsDirect(const MCInst &Inst);
unsigned getConstantBusLimit(unsigned Opcode) const;
bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
return false;
}
-Optional<StringRef>
+std::optional<StringRef>
AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
switch (RegKind) {
case IS_VGPR:
}
}
-Optional<StringRef> AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
+std::optional<StringRef>
+AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
using namespace SIInstrFlags;
const unsigned Opcode = Inst.getOpcode();
bool AMDGPUAsmParser::calculateGPRBlocks(
const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
- bool XNACKUsed, Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
- SMRange VGPRRange, unsigned NextFreeSGPR, SMRange SGPRRange,
- unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
+ bool XNACKUsed, std::optional<bool> EnableWavefrontSize32,
+ unsigned NextFreeVGPR, SMRange VGPRRange, unsigned NextFreeSGPR,
+ SMRange SGPRRange, unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
// TODO(scott.linder): These calculations are duplicated from
// AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
IsaVersion Version = getIsaVersion(getSTI().getCPU());
std::optional<unsigned> ExplicitUserSGPRCount;
bool ReserveVCC = true;
bool ReserveFlatScr = true;
- Optional<bool> EnableWavefrontSize32;
+ std::optional<bool> EnableWavefrontSize32;
while (true) {
while (trySkipToken(AsmToken::EndOfStatement));
if (IVersion.Major >= 7 && !ReserveFlatScr && !hasArchitectedFlatScratch(STI))
OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n';
- if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
+ if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVer) {
default:
break;
unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() {
assert(STI.getTargetTriple().getOS() == Triple::AMDHSA);
- if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
+ if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
protected:
// TODO: Move HSAMetadataStream to AMDGPUTargetStreamer.
- Optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID;
+ std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID;
MCContext &getContext() const { return Streamer.getContext(); }
static StringRef getArchNameFromElfMach(unsigned ElfMach);
static unsigned getElfMach(StringRef GPU);
- const Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const {
+ const std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const {
return TargetID;
}
- Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() {
+ std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() {
return TargetID;
}
void initializeTargetID(const MCSubtargetInfo &STI) {
static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF,
LivePhysRegs &LiveRegs,
Register &TempSGPR,
- Optional<int> &FrameIndex,
+ std::optional<int> &FrameIndex,
bool IsFP) {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF.getFrameInfo();
// turn on all lanes before doing the spill to memory.
Register ScratchExecCopy;
- Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
- Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
+ std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
+ std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
// VGPRs used for SGPR->VGPR spills
for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg :
const Register BasePtrReg =
TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();
- Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
- Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
+ std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
+ std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
if (RoundedSize != 0 && hasFP(MF)) {
auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg)
const SDLoc &SL) const {
Function &F = DAG.getMachineFunction().getFunction();
- Optional<uint32_t> KnownSize =
+ std::optional<uint32_t> KnownSize =
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (KnownSize.has_value())
return DAG.getConstant(KnownSize.value(), SL, MVT::i32);
// input for kernels, and is computed from the kernarg segment pointer.
InputReg = getImplicitArgPtr(DAG, DL);
} else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
- Optional<uint32_t> Id = AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
+ std::optional<uint32_t> Id =
+ AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (Id.has_value()) {
InputReg = DAG.getConstant(Id.value(), DL, ArgVT);
} else {
Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
return lowerTrapEndpgm(Op, DAG);
- if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
+ if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
if (Opcode == AMDGPU::G_FCANONICALIZE)
return true;
- Optional<FPValueAndVReg> FCR;
+ std::optional<FPValueAndVReg> FCR;
// Constant splat (can be padded with undef) or scalar constant.
if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) {
if (FCR->Value.isSignaling())
int32_t NewOffset) const;
Register computeBase(MachineInstr &MI, const MemAddress &Addr) const;
MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
- Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
+ std::optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
/// Promotes constant offset to the immediate by adjusting the base. It
/// tries to use a base from the nearby instructions that allows it to have
TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
}
-Optional<int32_t>
+std::optional<int32_t>
SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
if (Op.isImm())
return Op.getImm();
return false;
}
- Optional<int> SpillFI;
+ std::optional<int> SpillFI;
// We need to preserve inactive lanes, so always save, even caller-save
// registers.
if (!isEntryFunction()) {
// Current recorded maximum possible occupancy.
unsigned Occupancy;
- mutable Optional<bool> UsesAGPRs;
+ mutable std::optional<bool> UsesAGPRs;
MCPhysReg getNextUserSGPR() const;
// If the VGPR is used for SGPR spills in a non-entrypoint function, the
// stack slot used to save/restore it in the prolog/epilog.
- Optional<int> FI;
+ std::optional<int> FI;
- SGPRSpillVGPR(Register V, Optional<int> F) : VGPR(V), FI(F) {}
+ SGPRSpillVGPR(Register V, std::optional<int> F) : VGPR(V), FI(F) {}
};
struct VGPRSpillToAGPR {
// Emergency stack slot. Sometimes, we create this before finalizing the stack
// frame, so save it here and add it to the RegScavenger later.
- Optional<int> ScavengeFI;
+ std::optional<int> ScavengeFI;
private:
Register VGPRForAGPRCopy;
/// If this is set, an SGPR used for save/restore of the register used for the
/// frame pointer.
Register SGPRForFPSaveRestoreCopy;
- Optional<int> FramePointerSaveIndex;
+ std::optional<int> FramePointerSaveIndex;
/// If this is set, an SGPR used for save/restore of the register used for the
/// base pointer.
Register SGPRForBPSaveRestoreCopy;
- Optional<int> BasePointerSaveIndex;
+ std::optional<int> BasePointerSaveIndex;
bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg);
bool ResetSGPRSpillStackIDs);
int getScavengeFI(MachineFrameInfo &MFI, const SIRegisterInfo &TRI);
- Optional<int> getOptionalScavengeFI() const { return ScavengeFI; }
+ std::optional<int> getOptionalScavengeFI() const { return ScavengeFI; }
unsigned getBytesInStackArgArea() const {
return BytesInStackArgArea;
/// the SI atomic scope it corresponds to, the address spaces it
/// covers, and whether the memory ordering applies between address
/// spaces.
- Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
+ std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrAddrSpace) const;
/// \return Return a bit set of the address spaces accessed by \p AS.
/// \returns Info constructed from \p MI, which has at least machine memory
/// operand.
- Optional<SIMemOpInfo> constructFromMIWithMMO(
- const MachineBasicBlock::iterator &MI) const;
+ std::optional<SIMemOpInfo>
+ constructFromMIWithMMO(const MachineBasicBlock::iterator &MI) const;
public:
/// Construct class to support accessing the machine memory operands
SIMemOpAccess(MachineFunction &MF);
/// \returns Load info if \p MI is a load operation, "std::nullopt" otherwise.
- Optional<SIMemOpInfo> getLoadInfo(
- const MachineBasicBlock::iterator &MI) const;
+ std::optional<SIMemOpInfo>
+ getLoadInfo(const MachineBasicBlock::iterator &MI) const;
/// \returns Store info if \p MI is a store operation, "std::nullopt"
/// otherwise.
- Optional<SIMemOpInfo> getStoreInfo(
- const MachineBasicBlock::iterator &MI) const;
+ std::optional<SIMemOpInfo>
+ getStoreInfo(const MachineBasicBlock::iterator &MI) const;
/// \returns Atomic fence info if \p MI is an atomic fence operation,
/// "std::nullopt" otherwise.
- Optional<SIMemOpInfo> getAtomicFenceInfo(
- const MachineBasicBlock::iterator &MI) const;
+ std::optional<SIMemOpInfo>
+ getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const;
/// \returns Atomic cmpxchg/rmw info if \p MI is an atomic cmpxchg or
/// rmw operation, "std::nullopt" otherwise.
- Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo(
- const MachineBasicBlock::iterator &MI) const;
+ std::optional<SIMemOpInfo>
+ getAtomicCmpxchgOrRmwInfo(const MachineBasicBlock::iterator &MI) const;
};
class SICacheControl {
Func.getContext().diagnose(Diag);
}
-Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
+std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID,
SIAtomicAddrSpace InstrAddrSpace) const {
if (SSID == SyncScope::System)
MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
}
-Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
+std::optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
const MachineBasicBlock::iterator &MI) const {
assert(MI->getNumMemOperands() > 0);
IsNonTemporal);
}
-Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
- const MachineBasicBlock::iterator &MI) const {
+std::optional<SIMemOpInfo>
+SIMemOpAccess::getLoadInfo(const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
if (!(MI->mayLoad() && !MI->mayStore()))
return constructFromMIWithMMO(MI);
}
-Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
- const MachineBasicBlock::iterator &MI) const {
+std::optional<SIMemOpInfo>
+SIMemOpAccess::getStoreInfo(const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
if (!(!MI->mayLoad() && MI->mayStore()))
return constructFromMIWithMMO(MI);
}
-Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
- const MachineBasicBlock::iterator &MI) const {
+std::optional<SIMemOpInfo>
+SIMemOpAccess::getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
IsCrossAddressSpaceOrdering, AtomicOrdering::NotAtomic);
}
-Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
+std::optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
SmallVector<MachineInstr *, 8> ConvertedInstructions;
- Optional<int64_t> foldToImm(const MachineOperand &Op) const;
+ std::optional<int64_t> foldToImm(const MachineOperand &Op) const;
public:
static char ID;
return SDWADstOperand::convertToSDWA(MI, TII);
}
-Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
+std::optional<int64_t>
+SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
if (Op.isImm()) {
return Op.getImm();
}
namespace AMDGPU {
-Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
+std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
return std::nullopt;
}
bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
- if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+ if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
return false;
}
bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
- if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+ if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
return false;
}
bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
- if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+ if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
return false;
}
bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
- if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+ if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
return false;
}
return 0;
}
-Optional<unsigned> InstInfo::getInvalidCompOperandIndex(
+std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {
auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx);
.str();
std::string Features;
- if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
+ if (std::optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVersion) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
// Code object V2 only supported specific processors and had fixed
}
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
- Optional<bool> EnableWavefrontSize32) {
+ std::optional<bool> EnableWavefrontSize32) {
if (STI->getFeatureBits().test(FeatureGFX90AInsts))
return 8;
}
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
- Optional<bool> EnableWavefrontSize32) {
+ std::optional<bool> EnableWavefrontSize32) {
if (STI->getFeatureBits().test(FeatureGFX90AInsts))
return 8;
}
unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
- Optional<bool> EnableWavefrontSize32) {
+ std::optional<bool> EnableWavefrontSize32) {
NumVGPRs = alignTo(std::max(1u, NumVGPRs),
getVGPREncodingGranule(STI, EnableWavefrontSize32));
// VGPRBlocks is actual number of VGPR blocks minus 1.
return ByteOffset >> 2;
}
-Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
- int64_t ByteOffset, bool IsBuffer) {
+std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
+ int64_t ByteOffset, bool IsBuffer) {
// The signed version is always a byte offset.
if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
assert(hasSMEMByteOffset(ST));
- return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : std::nullopt;
+ return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
+ : std::nullopt;
}
if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
- ? Optional<int64_t>(EncodedOffset)
+ ? std::optional<int64_t>(EncodedOffset)
: std::nullopt;
}
-Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
- int64_t ByteOffset) {
+std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
+ int64_t ByteOffset) {
if (!isCI(ST) || !isDwordAligned(ByteOffset))
return std::nullopt;
int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
- return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset)
+ return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
: std::nullopt;
}
struct IsaVersion;
/// \returns HSA OS ABI Version identification.
-Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI);
+std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI);
/// \returns True if HSA OS ABI Version identification is 2,
/// false otherwise.
bool isHsaAbiVersion2(const MCSubtargetInfo *STI);
/// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
unsigned
getVGPRAllocGranule(const MCSubtargetInfo *STI,
- Optional<bool> EnableWavefrontSize32 = std::nullopt);
+ std::optional<bool> EnableWavefrontSize32 = std::nullopt);
/// \returns VGPR encoding granularity for given subtarget \p STI.
///
/// For subtargets which support it, \p EnableWavefrontSize32 should match
/// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
-unsigned
-getVGPREncodingGranule(const MCSubtargetInfo *STI,
- Optional<bool> EnableWavefrontSize32 = std::nullopt);
+unsigned getVGPREncodingGranule(
+ const MCSubtargetInfo *STI,
+ std::optional<bool> EnableWavefrontSize32 = std::nullopt);
/// \returns Total number of VGPRs for given subtarget \p STI.
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI);
///
/// For subtargets which support it, \p EnableWavefrontSize32 should match the
/// ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
-unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
- Optional<bool> EnableWavefrontSize32 = std::nullopt);
+unsigned
+getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
+ std::optional<bool> EnableWavefrontSize32 = std::nullopt);
} // end namespace IsaInfo
class ComponentProps {
private:
unsigned SrcOperandsNum = 0;
- Optional<unsigned> MandatoryLiteralIdx;
+ std::optional<unsigned> MandatoryLiteralIdx;
bool HasSrc2Acc = false;
public:
// Check VOPD operands constraints.
// Return the index of an invalid component operand, if any.
- Optional<unsigned> getInvalidCompOperandIndex(
+ std::optional<unsigned> getInvalidCompOperandIndex(
std::function<unsigned(unsigned, unsigned)> GetRegIdx) const;
private:
/// SMRD offset field, or std::nullopt if it won't fit. On GFX9 and GFX10
/// S_LOAD instructions have a signed offset, on other subtargets it is
/// unsigned. S_BUFFER has an unsigned offset for all subtargets.
-Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
- int64_t ByteOffset, bool IsBuffer);
+std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
+ int64_t ByteOffset, bool IsBuffer);
/// \return The encoding that can be used for a 32-bit literal offset in an SMRD
/// instruction. This is only useful on CI.s
-Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
- int64_t ByteOffset);
+std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
+ int64_t ByteOffset);
/// For FLAT segment the offset must be positive;
/// MSB is ignored and forced to zero.
auto MIBFCst2 = B.buildFConstant(s32, 2.0);
// Test G_ADD folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGAddInt =
- ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGAddInt = ConstantFoldBinOp(
+ TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAddInt.has_value());
EXPECT_EQ(25ULL, FoldGAddInt.value().getLimitedValue());
- Optional<APInt> FoldGAddMix =
- ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGAddMix = ConstantFoldBinOp(
+ TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAddMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGAddMix.value().getLimitedValue());
// Test G_AND folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGAndInt =
- ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGAndInt = ConstantFoldBinOp(
+ TargetOpcode::G_AND, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAndInt.has_value());
EXPECT_EQ(0ULL, FoldGAndInt.value().getLimitedValue());
- Optional<APInt> FoldGAndMix =
- ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst2.getReg(0),
- MIBFCst1.getReg(0), *MRI);
+ std::optional<APInt> FoldGAndMix = ConstantFoldBinOp(
+ TargetOpcode::G_AND, MIBCst2.getReg(0), MIBFCst1.getReg(0), *MRI);
EXPECT_TRUE(FoldGAndMix.has_value());
EXPECT_EQ(1ULL, FoldGAndMix.value().getLimitedValue());
// Test G_ASHR folding Integer + Mixed cases
- Optional<APInt> FoldGAShrInt =
- ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGAShrInt = ConstantFoldBinOp(
+ TargetOpcode::G_ASHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAShrInt.has_value());
EXPECT_EQ(0ULL, FoldGAShrInt.value().getLimitedValue());
- Optional<APInt> FoldGAShrMix =
- ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBFCst2.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGAShrMix = ConstantFoldBinOp(
+ TargetOpcode::G_ASHR, MIBFCst2.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAShrMix.has_value());
EXPECT_EQ(2097152ULL, FoldGAShrMix.value().getLimitedValue());
// Test G_LSHR folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGLShrInt =
- ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGLShrInt = ConstantFoldBinOp(
+ TargetOpcode::G_LSHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGLShrInt.has_value());
EXPECT_EQ(0ULL, FoldGLShrInt.value().getLimitedValue());
- Optional<APInt> FoldGLShrMix =
- ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBFCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGLShrMix = ConstantFoldBinOp(
+ TargetOpcode::G_LSHR, MIBFCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGLShrMix.has_value());
EXPECT_EQ(2080768ULL, FoldGLShrMix.value().getLimitedValue());
// Test G_MUL folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGMulInt =
- ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGMulInt = ConstantFoldBinOp(
+ TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGMulInt.has_value());
EXPECT_EQ(144ULL, FoldGMulInt.value().getLimitedValue());
- Optional<APInt> FoldGMulMix =
- ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGMulMix = ConstantFoldBinOp(
+ TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGMulMix.has_value());
EXPECT_EQ(0ULL, FoldGMulMix.value().getLimitedValue());
// Test G_OR folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGOrInt =
- ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGOrInt = ConstantFoldBinOp(
+ TargetOpcode::G_OR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGOrInt.has_value());
EXPECT_EQ(25ULL, FoldGOrInt.value().getLimitedValue());
- Optional<APInt> FoldGOrMix =
- ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGOrMix = ConstantFoldBinOp(
+ TargetOpcode::G_OR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGOrMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGOrMix.value().getLimitedValue());
// Test G_SHL folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGShlInt =
- ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGShlInt = ConstantFoldBinOp(
+ TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGShlInt.has_value());
EXPECT_EQ(8192ULL, FoldGShlInt.value().getLimitedValue());
- Optional<APInt> FoldGShlMix =
- ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGShlMix = ConstantFoldBinOp(
+ TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGShlMix.has_value());
EXPECT_EQ(0ULL, FoldGShlMix.value().getLimitedValue());
// Test G_SUB folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGSubInt =
- ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGSubInt = ConstantFoldBinOp(
+ TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSubInt.has_value());
EXPECT_EQ(7ULL, FoldGSubInt.value().getLimitedValue());
- Optional<APInt> FoldGSubMix =
- ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGSubMix = ConstantFoldBinOp(
+ TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSubMix.has_value());
EXPECT_EQ(3221225488ULL, FoldGSubMix.value().getLimitedValue());
// Test G_XOR folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGXorInt =
- ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGXorInt = ConstantFoldBinOp(
+ TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGXorInt.has_value());
EXPECT_EQ(25ULL, FoldGXorInt.value().getLimitedValue());
- Optional<APInt> FoldGXorMix =
- ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGXorMix = ConstantFoldBinOp(
+ TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGXorMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGXorMix.value().getLimitedValue());
// Test G_UDIV folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGUdivInt =
- ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGUdivInt = ConstantFoldBinOp(
+ TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUdivInt.has_value());
EXPECT_EQ(1ULL, FoldGUdivInt.value().getLimitedValue());
- Optional<APInt> FoldGUdivMix =
- ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGUdivMix = ConstantFoldBinOp(
+ TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUdivMix.has_value());
EXPECT_EQ(0ULL, FoldGUdivMix.value().getLimitedValue());
// Test G_SDIV folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGSdivInt =
- ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGSdivInt = ConstantFoldBinOp(
+ TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSdivInt.has_value());
EXPECT_EQ(1ULL, FoldGSdivInt.value().getLimitedValue());
- Optional<APInt> FoldGSdivMix =
- ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGSdivMix = ConstantFoldBinOp(
+ TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSdivMix.has_value());
EXPECT_EQ(0ULL, FoldGSdivMix.value().getLimitedValue());
// Test G_UREM folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGUremInt =
- ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGUremInt = ConstantFoldBinOp(
+ TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUremInt.has_value());
EXPECT_EQ(1ULL, FoldGUremInt.value().getLimitedValue());
- Optional<APInt> FoldGUremMix =
- ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGUremMix = ConstantFoldBinOp(
+ TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUremMix.has_value());
EXPECT_EQ(0ULL, FoldGUremMix.value().getLimitedValue());
// Test G_SREM folding Integer + Mixed Int-Float cases
- Optional<APInt> FoldGSremInt =
- ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
- MIBCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGSremInt = ConstantFoldBinOp(
+ TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSremInt.has_value());
EXPECT_EQ(7ULL, FoldGSremInt.value().getLimitedValue());
- Optional<APInt> FoldGSremMix =
- ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
- MIBFCst2.getReg(0), *MRI);
+ std::optional<APInt> FoldGSremMix = ConstantFoldBinOp(
+ TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSremMix.has_value());
EXPECT_EQ(16ULL, FoldGSremMix.value().getLimitedValue());
}
if (!TM)
return;
auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
- Optional<ValueAndVReg> Src0;
+ std::optional<ValueAndVReg> Src0;
bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
EXPECT_TRUE(match);
EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
- Optional<ValueAndVReg> ValReg;
- Optional<FPValueAndVReg> FValReg;
+ std::optional<ValueAndVReg> ValReg;
+ std::optional<FPValueAndVReg> FValReg;
EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
EXPECT_EQ(IntOne, ValReg->VReg);
Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
Register Undef = B.buildUndef(s64).getReg(0);
- Optional<FPValueAndVReg> FValReg;
+ std::optional<FPValueAndVReg> FValReg;
// GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
// from padding to legalize into available operation and then ignore added
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
- Optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
bool IsAlias;
// Maybe unlikely that BaseIndexOffset::computeAliasing is used with the
// optional NumBytes being unset like in this test, but it would be confusing
// if that function determined IsAlias=false here.
- Optional<int64_t> NumBytes;
+ std::optional<int64_t> NumBytes;
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
PtrInfo.getWithOffset(Offset1));
- Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
bool IsAlias;
DAG->getStore(DAG->getEntryNode(), Loc, Value, FIPtr, PtrInfo);
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
MachinePointerInfo(PtrInfo.getAddrSpace()));
- Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
bool IsAlias;
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
- Optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
EVT GTy = DAG->getTargetLoweringInfo().getValueType(DAG->getDataLayout(),
G->getType());
SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
MachinePointerInfo(G, 0));
- Optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());
bool IsAlias;
SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
MachinePointerInfo(G, 0));
- Optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());
SDValue AliasedGValue = DAG->getConstant(1, Loc, GTy);
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo.getWithOffset(Offset1));
- Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
bool IsAlias;
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo.getWithOffset(Offset1));
- Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
bool IsAlias;
PtrInfo0.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo1.getWithOffset(Offset0));
- Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+ std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
bool IsAlias;