The updated functions are mostly internal with a few exceptions (virtual functions in
TargetInstrInfo.h, TargetRegisterInfo.h).
To minimize changes to LLVMCodeGen, GlobalISel files are skipped.
https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716
#include <vector>
namespace llvm {
-
-template <typename T, typename = decltype(std::declval<raw_ostream &>()
- << std::declval<const T &>())>
-raw_ostream &operator<<(raw_ostream &OS, const std::optional<T> &O) {
- if (O)
- OS << *O;
- else
- OS << std::nullopt;
- return OS;
-}
-
namespace bolt {
// NOTE: using SmallVector for instruction list results in a memory regression.
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <new>
+#include <optional>
#include <utility>
namespace llvm {
return OS;
}
+template <typename T, typename = decltype(std::declval<raw_ostream &>()
+ << std::declval<const T &>())>
+raw_ostream &operator<<(raw_ostream &OS, const std::optional<T> &O) {
+ if (O)
+ OS << *O;
+ else
+ OS << std::nullopt;
+ return OS;
+}
+
} // end namespace llvm
#endif // LLVM_ADT_OPTIONAL_H
///
/// Note: This hook is guaranteed to be called from the innermost to the
/// outermost prologue of the loop being software pipelined.
- virtual Optional<bool>
+ virtual std::optional<bool>
createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond) = 0;
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
- virtual Optional<DestSourcePair>
+ virtual std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const {
return std::nullopt;
}
/// For COPY-instruction the method naturally returns destination and source
/// registers as machine operands, for all other instructions the method calls
/// target-dependent implementation.
- Optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
+ std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
if (MI.isCopy()) {
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
}
/// immediate value and a physical register, and stores the result in
/// the given physical register \c Reg, return a pair of the source
/// register and the offset which has been added.
- virtual Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
- Register Reg) const {
+ virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
+ Register Reg) const {
return std::nullopt;
}
/// MachineInstr that is accessing memory. These values are returned as a
/// struct ExtAddrMode which contains all relevant information to make up the
/// address.
- virtual Optional<ExtAddrMode>
+ virtual std::optional<ExtAddrMode>
getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const {
return std::nullopt;
/// Produce the expression describing the \p MI loading a value into
/// the physical register \p Reg. This hook should only be used with
/// \p MIs belonging to VReg-less functions.
- virtual Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
- Register Reg) const;
+ virtual std::optional<ParamLoadedValue>
+ describeLoadedValue(const MachineInstr &MI, Register Reg) const;
/// Given the generic extension instruction \p ExtMI, returns true if this
/// extension is a likely candidate for being folded into an another
/// The absence of an explanation does not mean that the register is not
/// reserved (meaning, you should check that PhysReg is in fact reserved
/// before calling this).
- virtual llvm::Optional<std::string>
+ virtual std::optional<std::string>
explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
return {};
}
DiagnosticInfoInlineAsm(LocCookie, Note, DiagnosticSeverity::DS_Note));
for (const Register RR : RestrRegs) {
- if (llvm::Optional<std::string> reason =
+ if (std::optional<std::string> reason =
TRI->explainReservedReg(*MF, RR)) {
MMI->getModule()->getContext().diagnose(DiagnosticInfoInlineAsm(
LocCookie, *reason, DiagnosticSeverity::DS_Note));
namespace {
-static Optional<DestSourcePair> isCopyInstr(const MachineInstr &MI,
- const TargetInstrInfo &TII,
- bool UseCopyInstr) {
+static std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI,
+ const TargetInstrInfo &TII,
+ bool UseCopyInstr) {
if (UseCopyInstr)
return TII.isCopyInstr(MI);
if (MI.isCopy())
- return Optional<DestSourcePair>(
+ return std::optional<DestSourcePair>(
DestSourcePair{MI.getOperand(0), MI.getOperand(1)});
return std::nullopt;
auto I = Copies.find(*RUI);
if (I != Copies.end()) {
if (MachineInstr *MI = I->second.MI) {
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*MI, TII, UseCopyInstr);
assert(CopyOperands && "Expect copy");
// When we clobber the destination of a copy, we need to clobber the
// whole register it defined.
if (MachineInstr *MI = I->second.MI) {
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*MI, TII, UseCopyInstr);
markRegsUnavailable({CopyOperands->Destination->getReg().asMCReg()},
TRI);
/// Add this copy's registers into the tracker's copy maps.
void trackCopy(MachineInstr *MI, const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII, bool UseCopyInstr) {
- Optional<DestSourcePair> CopyOperands = isCopyInstr(*MI, TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(*MI, TII, UseCopyInstr);
assert(CopyOperands && "Tracking non-copy?");
MCRegister Src = CopyOperands->Source->getReg().asMCReg();
if (!AvailCopy)
return nullptr;
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*AvailCopy, TII, UseCopyInstr);
Register AvailSrc = CopyOperands->Source->getReg();
Register AvailDef = CopyOperands->Destination->getReg();
if (!AvailCopy)
return nullptr;
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*AvailCopy, TII, UseCopyInstr);
Register AvailSrc = CopyOperands->Source->getReg();
Register AvailDef = CopyOperands->Destination->getReg();
MCRegister Def, const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII, bool UseCopyInstr) {
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(PreviousCopy, *TII, UseCopyInstr);
MCRegister PreviousSrc = CopyOperands->Source->getReg().asMCReg();
MCRegister PreviousDef = CopyOperands->Destination->getReg().asMCReg();
// Copy was redundantly redefining either Src or Def. Remove earlier kill
// flags between Copy and PrevCopy because the value will be reused now.
- Optional<DestSourcePair> CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(Copy, *TII, UseCopyInstr);
assert(CopyOperands);
Register CopyDef = CopyOperands->Destination->getReg();
bool MachineCopyPropagation::isBackwardPropagatableRegClassCopy(
const MachineInstr &Copy, const MachineInstr &UseI, unsigned UseIdx) {
-
- Optional<DestSourcePair> CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(Copy, *TII, UseCopyInstr);
Register Def = CopyOperands->Destination->getReg();
if (const TargetRegisterClass *URC =
bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy,
const MachineInstr &UseI,
unsigned UseIdx) {
-
- Optional<DestSourcePair> CopyOperands = isCopyInstr(Copy, *TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(Copy, *TII, UseCopyInstr);
Register CopySrcReg = CopyOperands->Source->getReg();
// If the new register meets the opcode register constraints, then allow
if (!Copy)
continue;
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*Copy, *TII, UseCopyInstr);
Register CopyDstReg = CopyOperands->Destination->getReg();
const MachineOperand &CopySrc = *CopyOperands->Source;
for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
// Analyze copies (which don't overlap themselves).
- Optional<DestSourcePair> CopyOperands = isCopyInstr(MI, *TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(MI, *TII, UseCopyInstr);
if (CopyOperands) {
Register RegSrc = CopyOperands->Source->getReg();
MaybeDeadCopies.begin();
DI != MaybeDeadCopies.end();) {
MachineInstr *MaybeDead = *DI;
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*MaybeDead, *TII, UseCopyInstr);
MCRegister Reg = CopyOperands->Destination->getReg().asMCReg();
assert(!MRI->isReserved(Reg));
LLVM_DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
MaybeDead->dump());
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*MaybeDead, *TII, UseCopyInstr);
assert(CopyOperands);
const MachineRegisterInfo &MRI,
const TargetInstrInfo &TII,
bool UseCopyInstr) {
- Optional<DestSourcePair> CopyOperands = isCopyInstr(MI, TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(MI, TII, UseCopyInstr);
assert(CopyOperands && "MI is expected to be a COPY");
Register Def = CopyOperands->Destination->getReg();
if (!Copy)
continue;
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*Copy, *TII, UseCopyInstr);
Register Def = CopyOperands->Destination->getReg();
Register Src = CopyOperands->Source->getReg();
for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) {
// Ignore non-trivial COPYs.
- Optional<DestSourcePair> CopyOperands = isCopyInstr(MI, *TII, UseCopyInstr);
+ std::optional<DestSourcePair> CopyOperands =
+ isCopyInstr(MI, *TII, UseCopyInstr);
if (CopyOperands && MI.getNumOperands() == 2) {
Register DefReg = CopyOperands->Destination->getReg();
Register SrcReg = CopyOperands->Source->getReg();
}
for (auto *Copy : MaybeDeadCopies) {
-
- Optional<DestSourcePair> CopyOperands =
+ std::optional<DestSourcePair> CopyOperands =
isCopyInstr(*Copy, *TII, UseCopyInstr);
Register Src = CopyOperands->Source->getReg();
Register Def = CopyOperands->Destination->getReg();
MachineBasicBlock *Epilog = EpilogBBs[i];
SmallVector<MachineOperand, 4> Cond;
- Optional<bool> StaticallyGreater =
+ std::optional<bool> StaticallyGreater =
LoopInfo->createTripCountGreaterCondition(j + 1, *Prolog, Cond);
unsigned numAdded = 0;
if (!StaticallyGreater) {
MachineBasicBlock *Epilog = *EI;
SmallVector<MachineOperand, 4> Cond;
TII->removeBranch(*Prolog);
- Optional<bool> StaticallyGreater =
+ std::optional<bool> StaticallyGreater =
LoopInfo->createTripCountGreaterCondition(TC, *Prolog, Cond);
if (!StaticallyGreater) {
LLVM_DEBUG(dbgs() << "Dynamic: TC > " << TC << "\n");
return (DefCycle != -1 && DefCycle <= 1);
}
-Optional<ParamLoadedValue>
+std::optional<ParamLoadedValue>
TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
Register Reg) const {
const MachineFunction *MF = MI.getMF();
void AArch64AsmPrinter::emitFunctionHeaderComment() {
const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
- Optional<std::string> OutlinerString = FI->getOutliningStyle();
+ std::optional<std::string> OutlinerString = FI->getOutliningStyle();
if (OutlinerString != std::nullopt)
OutStreamer->getCommentOS() << ' ' << OutlinerString;
}
/// Returns the size in instructions of the block \p MBB, or None if we
/// couldn't get a safe upper bound.
- Optional<int> computeBlockSize(MachineBasicBlock &MBB);
+ std::optional<int> computeBlockSize(MachineBasicBlock &MBB);
/// Gather information about the function, returns false if we can't perform
/// this optimization for some reason.
INITIALIZE_PASS(AArch64CompressJumpTables, DEBUG_TYPE,
"AArch64 compress jump tables pass", false, false)
-Optional<int>
+std::optional<int>
AArch64CompressJumpTables::computeBlockSize(MachineBasicBlock &MBB) {
int Size = 0;
for (const MachineInstr &MI : MBB) {
return (Dest & 0xf) | ((Base & 0xf) << 4) | ((Offset & 0x3f) << 8);
}
-static Optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
+static std::optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
int DestRegIdx;
int BaseRegIdx;
int OffsetIdx;
return LI;
}
-static Optional<unsigned> getTag(const TargetRegisterInfo *TRI,
- const MachineInstr &MI, const LoadInfo &LI) {
+static std::optional<unsigned> getTag(const TargetRegisterInfo *TRI,
+ const MachineInstr &MI,
+ const LoadInfo &LI) {
unsigned Dest = LI.DestReg ? TRI->getEncodingValue(LI.DestReg) : 0;
unsigned Base = TRI->getEncodingValue(LI.BaseReg);
unsigned Off;
TagMap.clear();
for (MachineBasicBlock *MBB : L.getBlocks())
for (MachineInstr &MI : *MBB) {
- Optional<LoadInfo> LInfo = getLoadInfo(MI);
+ std::optional<LoadInfo> LInfo = getLoadInfo(MI);
if (!LInfo)
continue;
- Optional<unsigned> Tag = getTag(TRI, MI, *LInfo);
+ std::optional<unsigned> Tag = getTag(TRI, MI, *LInfo);
if (!Tag)
continue;
TagMap[*Tag].push_back(&MI);
if (!TII->isStridedAccess(MI))
continue;
- Optional<LoadInfo> OptLdI = getLoadInfo(MI);
+ std::optional<LoadInfo> OptLdI = getLoadInfo(MI);
if (!OptLdI)
continue;
LoadInfo LdI = *OptLdI;
- Optional<unsigned> OptOldTag = getTag(TRI, MI, LdI);
+ std::optional<unsigned> OptOldTag = getTag(TRI, MI, LdI);
if (!OptOldTag)
continue;
auto &OldCollisions = TagMap[*OptOldTag];
// Set tagged base pointer to the requested stack slot.
// Ideally it should match SP value after prologue.
- Optional<int> TBPI = AFI->getTaggedBasePointerIndex();
+ std::optional<int> TBPI = AFI->getTaggedBasePointerIndex();
if (TBPI)
AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI));
else
// and save one instruction when generating the base pointer because IRG does
// not allow an immediate offset.
const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
- Optional<int> TBPI = AFI.getTaggedBasePointerIndex();
+ std::optional<int> TBPI = AFI.getTaggedBasePointerIndex();
if (TBPI) {
FrameObjects[*TBPI].ObjectFirst = true;
FrameObjects[*TBPI].GroupFirst = true;
// Returns lane if Op extracts from a two-element vector and lane is constant
// (i.e., extractelt(<2 x Ty> %v, ConstantLane)), and None otherwise.
-static Optional<uint64_t> getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
+static std::optional<uint64_t>
+getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
SDNode *OpNode = Op.getNode();
if (OpNode->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return std::nullopt;
Mask);
}
-static Optional<SMEAttrs> getCalleeAttrsFromExternalFunction(SDValue V) {
+static std::optional<SMEAttrs> getCalleeAttrsFromExternalFunction(SDValue V) {
if (auto *ES = dyn_cast<ExternalSymbolSDNode>(V)) {
StringRef S(ES->getSymbol());
if (S == "__arm_sme_state" || S == "__arm_tpidr2_save")
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- Optional<uint64_t> LHSLane = getConstantLaneNumOfExtractHalfOperand(LHS);
- Optional<uint64_t> RHSLane = getConstantLaneNumOfExtractHalfOperand(RHS);
+ std::optional<uint64_t> LHSLane =
+ getConstantLaneNumOfExtractHalfOperand(LHS);
+ std::optional<uint64_t> RHSLane =
+ getConstantLaneNumOfExtractHalfOperand(RHS);
assert((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1");
assert((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1");
// instructions execute on SIMD registers. So canonicalize i64 to v1i64,
// which ISel recognizes better. For example, generate a ldr into d*
// registers as opposed to a GPR load followed by a fmov.
- auto TryVectorizeOperand =
- [](SDValue N, Optional<uint64_t> NLane, Optional<uint64_t> OtherLane,
- const SDLoc &dl, SelectionDAG &DAG) -> SDValue {
+ auto TryVectorizeOperand = [](SDValue N, std::optional<uint64_t> NLane,
+ std::optional<uint64_t> OtherLane,
+ const SDLoc &dl,
+ SelectionDAG &DAG) -> SDValue {
// If the operand is an higher half itself, rewrite it to
// extract_high_v2i64; this way aarch64_neon_pmull64 could
// re-use the dag-combiner function with aarch64_neon_{pmull,smull,umull}.
unsigned ElementSize = 128 / Op.getValueType().getVectorMinNumElements();
unsigned NumActiveElems =
Op.getConstantOperandVal(2) - Op.getConstantOperandVal(1);
- Optional<unsigned> PredPattern =
+ std::optional<unsigned> PredPattern =
getSVEPredPatternFromNumElements(NumActiveElems);
if ((PredPattern != std::nullopt) &&
NumActiveElems <= (MinSVEVectorSize / ElementSize))
SMEAttrs CalleeAttrs, CallerAttrs(MF.getFunction());
if (CLI.CB)
CalleeAttrs = SMEAttrs(*CLI.CB);
- else if (Optional<SMEAttrs> Attrs =
+ else if (std::optional<SMEAttrs> Attrs =
getCalleeAttrsFromExternalFunction(CLI.Callee))
CalleeAttrs = *Attrs;
}
SDValue PStateSM;
- Optional<bool> RequiresSMChange = CallerAttrs.requiresSMChange(CalleeAttrs);
+ std::optional<bool> RequiresSMChange =
+ CallerAttrs.requiresSMChange(CalleeAttrs);
if (RequiresSMChange)
PStateSM = getPStateSM(DAG, Chain, CallerAttrs, DL, MVT::i64);
// splice predicate. However, we can only do this if we can guarantee that
// there are enough elements in the vector, hence we check the index <= min
// number of elements.
- Optional<unsigned> PredPattern;
+ std::optional<unsigned> PredPattern;
if (Ty.isScalableVector() && IdxVal < 0 &&
(PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
std::nullopt) {
if (Vec0.isUndef())
return Op;
- Optional<unsigned> PredPattern =
+ std::optional<unsigned> PredPattern =
getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
auto PredTy = VT.changeVectorElementType(MVT::i1);
SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
Value *PTrue = nullptr;
if (UseScalable) {
- Optional<unsigned> PgPattern =
+ std::optional<unsigned> PgPattern =
getSVEPredPatternFromNumElements(FVTy->getNumElements());
if (Subtarget->getMinSVEVectorSizeInBits() ==
Subtarget->getMaxSVEVectorSizeInBits() &&
Value *PTrue = nullptr;
if (UseScalable) {
- Optional<unsigned> PgPattern =
+ std::optional<unsigned> PgPattern =
getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
if (Subtarget->getMinSVEVectorSizeInBits() ==
Subtarget->getMaxSVEVectorSizeInBits() &&
DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
"Expected legal fixed length vector!");
- Optional<unsigned> PgPattern =
+ std::optional<unsigned> PgPattern =
getSVEPredPatternFromNumElements(VT.getVectorNumElements());
assert(PgPattern && "Unexpected element count for SVE predicate");
/// \returns None otherwise.
///
/// Collect instructions using that flags in \p CCUseInstrs if provided.
-Optional<UsedNZCV>
+std::optional<UsedNZCV>
llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
const TargetRegisterInfo &TRI,
SmallVectorImpl<MachineInstr *> *CCUseInstrs) {
if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
return false;
- Optional<UsedNZCV> NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI);
+ std::optional<UsedNZCV> NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI);
if (!NZVCUsed || NZVCUsed->C || NZVCUsed->V)
return false;
if (MIUsedNZCV.C || MIUsedNZCV.V)
return false;
- Optional<UsedNZCV> NZCVUsedAfterCmp =
+ std::optional<UsedNZCV> NZCVUsedAfterCmp =
examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs);
// Condition flags are not used in CmpInstr basic block successors and only
// Z or N flags allowed to be used after CmpInstr within its basic block
}
}
-Optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
+std::optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
switch (Opc) {
default: return {};
case AArch64::PRFMui: return AArch64::PRFUMi;
return true;
}
-Optional<ExtAddrMode>
+std::optional<ExtAddrMode>
AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const {
const MachineOperand *Base; // Filled with the base operand of MI.
// If the offset doesn't match the scale, we rewrite the instruction to
// use the unscaled instruction instead. Likewise, if we have a negative
// offset and there is an unscaled op to use.
- Optional<unsigned> UnscaledOp =
+ std::optional<unsigned> UnscaledOp =
AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode());
bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0);
if (useUnscaledOp &&
return MF.getFunction().hasMinSize();
}
-Optional<DestSourcePair>
+std::optional<DestSourcePair>
AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
// AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg
return std::nullopt;
}
-Optional<RegImmPair> AArch64InstrInfo::isAddImmediate(const MachineInstr &MI,
- Register Reg) const {
+std::optional<RegImmPair>
+AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {
int Sign = 1;
int64_t Offset = 0;
/// If the given ORR instruction is a copy, and \p DescribedReg overlaps with
/// the destination register then, if possible, describe the value in terms of
/// the source register.
-static Optional<ParamLoadedValue>
+static std::optional<ParamLoadedValue>
describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
return std::nullopt;
}
-Optional<ParamLoadedValue>
+std::optional<ParamLoadedValue>
AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI,
Register Reg) const {
const MachineFunction *MF = MI.getMF();
/// Returns the unscaled load/store for the scaled load/store opcode,
/// if there is a corresponding unscaled variant available.
- static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
+ static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
/// Scaling factor for (scaled or unscaled) load or store.
static int getMemScale(unsigned Opc);
/// Hint that pairing the given load or store is unprofitable.
static void suppressLdStPair(MachineInstr &MI);
- Optional<ExtAddrMode>
+ std::optional<ExtAddrMode>
getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const override;
/// on Windows.
static bool isSEHInstruction(const MachineInstr &MI);
- Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
+ Register Reg) const override;
- Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<ParamLoadedValue>
+ describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
/// If the specific machine instruction is an instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
- Optional<DestSourcePair>
+ std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;
private:
/// \returns None otherwise.
///
/// Collect instructions using that flags in \p CCUseInstrs if provided.
-Optional<UsedNZCV>
+std::optional<UsedNZCV>
examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
const TargetRegisterInfo &TRI,
SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
// If not none, RenameReg can be used to rename the result register of the
// first store in a pair. Currently this only works when merging stores
// forward.
- Optional<MCPhysReg> RenameReg = std::nullopt;
+ std::optional<MCPhysReg> RenameReg = std::nullopt;
LdStPairFlags() = default;
void setRenameReg(MCPhysReg R) { RenameReg = R; }
void clearRenameReg() { RenameReg = std::nullopt; }
- Optional<MCPhysReg> getRenameReg() const { return RenameReg; }
+ std::optional<MCPhysReg> getRenameReg() const { return RenameReg; }
};
struct AArch64LoadStoreOpt : public MachineFunctionPass {
bool MergeForward = Flags.getMergeForward();
- Optional<MCPhysReg> RenameReg = Flags.getRenameReg();
+ std::optional<MCPhysReg> RenameReg = Flags.getRenameReg();
if (MergeForward && RenameReg) {
MCRegister RegToRename = getLdStRegOp(*I).getReg();
DefinedInBB.addReg(*RenameReg);
// * not used in \p UsedInBetween; UsedInBetween must contain all accessed
// registers in the range the rename register will be used,
// * is available in all used register classes (checked using RequiredClasses).
-static Optional<MCPhysReg> tryToFindRegisterToRename(
+static std::optional<MCPhysReg> tryToFindRegisterToRename(
const MachineFunction &MF, Register Reg, LiveRegUnits &DefinedInBB,
LiveRegUnits &UsedInBetween,
SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
RequiredClasses, TRI)};
if (*MaybeCanRename) {
- Optional<MCPhysReg> MaybeRenameReg = tryToFindRegisterToRename(
- *FirstMI.getParent()->getParent(), Reg, DefinedInBB,
- UsedInBetween, RequiredClasses, TRI);
+ std::optional<MCPhysReg> MaybeRenameReg =
+ tryToFindRegisterToRename(*FirstMI.getParent()->getParent(),
+ Reg, DefinedInBB, UsedInBetween,
+ RequiredClasses, TRI);
if (MaybeRenameReg) {
Flags.setRenameReg(*MaybeRenameReg);
Flags.setMergeForward(true);
using OpcodePair = std::pair<unsigned, unsigned>;
template <typename T>
using SplitAndOpcFunc =
- std::function<Optional<OpcodePair>(T, unsigned, T &, T &)>;
+ std::function<std::optional<OpcodePair>(T, unsigned, T &, T &)>;
using BuildMIFunc =
std::function<void(MachineInstr &, OpcodePair, unsigned, unsigned,
Register, Register, Register)>;
return splitTwoPartImm<T>(
MI,
- [Opc](T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<OpcodePair> {
+ [Opc](T Imm, unsigned RegSize, T &Imm0,
+ T &Imm1) -> std::optional<OpcodePair> {
if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1))
return std::make_pair(Opc, Opc);
return std::nullopt;
return splitTwoPartImm<T>(
MI,
[PosOpc, NegOpc](T Imm, unsigned RegSize, T &Imm0,
- T &Imm1) -> Optional<OpcodePair> {
+ T &Imm1) -> std::optional<OpcodePair> {
if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
return std::make_pair(PosOpc, PosOpc);
if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
// that the condition code usages are only for Equal and Not Equal
return splitTwoPartImm<T>(
MI,
- [PosOpcs, NegOpcs, &MI, &TRI = TRI, &MRI = MRI](
- T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<OpcodePair> {
+ [PosOpcs, NegOpcs, &MI, &TRI = TRI,
+ &MRI = MRI](T Imm, unsigned RegSize, T &Imm0,
+ T &Imm1) -> std::optional<OpcodePair> {
OpcodePair OP;
if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
OP = PosOpcs;
// Check conditional uses last since it is expensive for scanning
// proceeding instructions
MachineInstr &SrcMI = *MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
- Optional<UsedNZCV> NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI);
+ std::optional<UsedNZCV> NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI);
if (!NZCVUsed || NZCVUsed->C || NZCVUsed->V)
return std::nullopt;
return OP;
SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
/// FrameIndex for the tagged base pointer.
- Optional<int> TaggedBasePointerIndex;
+ std::optional<int> TaggedBasePointerIndex;
/// Offset from SP-at-entry to the tagged base pointer.
/// Tagged base pointer is set up to point to the first (lowest address)
/// OutliningStyle denotes, if a function was outined, how it was outlined,
/// e.g. Tail Call, Thunk, or Function if none apply.
- Optional<std::string> OutliningStyle;
+ std::optional<std::string> OutliningStyle;
// Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
// CalleeSavedStackSize) to the address of the frame record.
/// True if the function need unwind information.
- mutable Optional<bool> NeedsDwarfUnwindInfo;
+ mutable std::optional<bool> NeedsDwarfUnwindInfo;
/// True if the function need asynchronous unwind information.
- mutable Optional<bool> NeedsAsyncDwarfUnwindInfo;
+ mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
public:
explicit AArch64FunctionInfo(MachineFunction &MF);
uint64_t getLocalStackSize() const { return LocalStackSize; }
void setOutliningStyle(std::string Style) { OutliningStyle = Style; }
- Optional<std::string> getOutliningStyle() const { return OutliningStyle; }
+ std::optional<std::string> getOutliningStyle() const {
+ return OutliningStyle;
+ }
void setCalleeSavedStackSize(unsigned Size) {
CalleeSavedStackSize = Size;
return ForwardedMustTailRegParms;
}
- Optional<int> getTaggedBasePointerIndex() const {
+ std::optional<int> getTaggedBasePointerIndex() const {
return TaggedBasePointerIndex;
}
void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
return CSR_AArch64_StackProbe_Windows_RegMask;
}
-llvm::Optional<std::string>
+std::optional<std::string>
AArch64RegisterInfo::explainReservedReg(const MachineFunction &MF,
MCRegister PhysReg) const {
if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const;
BitVector getReservedRegs(const MachineFunction &MF) const override;
- llvm::Optional<std::string>
+ std::optional<std::string>
explainReservedReg(const MachineFunction &MF,
MCRegister PhysReg) const override;
bool isAsmClobberable(const MachineFunction &MF,
bool mayUseUncheckedLoadStore();
void uncheckUsesOf(unsigned TaggedReg, int FI);
void uncheckLoadsAndStores();
- Optional<int> findFirstSlotCandidate();
+ std::optional<int> findFirstSlotCandidate();
bool runOnMachineFunction(MachineFunction &Func) override;
StringRef getPassName() const override {
// eliminates a vreg (by replacing it with direct uses of IRG, which is usually
// live almost everywhere anyway), and therefore needs to happen before
// regalloc.
-Optional<int> AArch64StackTaggingPreRA::findFirstSlotCandidate() {
+std::optional<int> AArch64StackTaggingPreRA::findFirstSlotCandidate() {
// Find the best (FI, Tag) pair to pin to offset 0.
// Looking at the possible uses of a tagged address, the advantage of pinning
// is:
// Find a slot that is used with zero tag offset, like ADDG #fi, 0.
// If the base tagged pointer is set up to the address of this slot,
// the ADDG instruction can be eliminated.
- Optional<int> BaseSlot = findFirstSlotCandidate();
+ std::optional<int> BaseSlot = findFirstSlotCandidate();
if (BaseSlot)
AFI->setTaggedBasePointerIndex(*BaseSlot);
/// is a valid vector kind. Where the number of elements in a vector
/// or the vector width is implicit or explicitly unknown (but still a
/// valid suffix kind), 0 is used.
-static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
- RegKind VectorKind) {
+static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
+ RegKind VectorKind) {
std::pair<int, int> Res = {-1, -1};
switch (VectorKind) {
if (Res == std::make_pair(-1, -1))
return std::nullopt;
- return Optional<std::pair<int, int>>(Res);
+ return std::optional<std::pair<int, int>>(Res);
}
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
auto LookupByName = [](StringRef N) {
if (IsSVEPrefetch) {
if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
- return Optional<unsigned>(Res->Encoding);
+ return std::optional<unsigned>(Res->Encoding);
} else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
- return Optional<unsigned>(Res->Encoding);
- return Optional<unsigned>();
+ return std::optional<unsigned>(Res->Encoding);
+ return std::optional<unsigned>();
};
auto LookupByEncoding = [](unsigned E) {
if (IsSVEPrefetch) {
if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
- return Optional<StringRef>(Res->Name);
+ return std::optional<StringRef>(Res->Name);
} else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
- return Optional<StringRef>(Res->Name);
- return Optional<StringRef>();
+ return std::optional<StringRef>(Res->Name);
+ return std::optional<StringRef>();
};
unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
return MatchOperand_NoMatch;
StringRef Tail = Name.drop_front(DotPosition);
- const Optional<std::pair<int, int>> &KindRes =
+ const std::optional<std::pair<int, int>> &KindRes =
parseVectorKind(Tail, RegKind::Matrix);
if (!KindRes) {
TokError("Expected the register to be followed by element width suffix");
}
/// Return specific VL predicate pattern based on the number of elements.
-inline Optional<unsigned>
+inline std::optional<unsigned>
getSVEPredPatternFromNumElements(unsigned MinNumElts) {
switch (MinNumElts) {
default:
}
/// Return numeric key ID for 2-letter identifier string.
-inline static Optional<AArch64PACKey::ID>
+inline static std::optional<AArch64PACKey::ID>
AArch64StringToPACKeyID(StringRef Name) {
if (Name == "ia")
return AArch64PACKey::IA;
Bitmask |= ZA_Preserved;
}
-Optional<bool> SMEAttrs::requiresSMChange(const SMEAttrs &Callee,
- bool BodyOverridesInterface) const {
+std::optional<bool>
+SMEAttrs::requiresSMChange(const SMEAttrs &Callee,
+ bool BodyOverridesInterface) const {
// If the transition is not through a call (e.g. when considering inlining)
// and Callee has a streaming body, then we can ignore the interface of
// Callee.
if (BodyOverridesInterface && Callee.hasStreamingBody()) {
- return hasStreamingInterfaceOrBody() ? std::nullopt : Optional<bool>(true);
+ return hasStreamingInterfaceOrBody() ? std::nullopt
+ : std::optional<bool>(true);
}
if (Callee.hasStreamingCompatibleInterface())
/// interface. This can be useful when considering e.g. inlining, where we
/// explicitly want the body to overrule the interface (because after inlining
/// the interface is no longer relevant).
- Optional<bool> requiresSMChange(const SMEAttrs &Callee,
- bool BodyOverridesInterface = false) const;
+ std::optional<bool>
+ requiresSMChange(const SMEAttrs &Callee,
+ bool BodyOverridesInterface = false) const;
// Interfaces to query PSTATE.ZA
bool hasNewZAInterface() const { return Bitmask & ZA_New; }
Mov->addRegisterKilled(SrcReg, TRI);
}
-Optional<DestSourcePair>
+std::optional<DestSourcePair>
ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
// VMOVRRD is also a copy instruction but it requires
// special way of handling. It is more complex copy version
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
}
-Optional<ParamLoadedValue>
+std::optional<ParamLoadedValue>
ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
Register Reg) const {
if (auto DstSrcPair = isCopyInstrImpl(MI)) {
return makeArrayRef(TargetFlags);
}
-Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI,
- Register Reg) const {
+std::optional<RegImmPair>
+ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {
int Sign = 1;
unsigned Opcode = MI.getOpcode();
int64_t Offset = 0;
return true;
}
- Optional<bool> createTripCountGreaterCondition(
+ std::optional<bool> createTripCountGreaterCondition(
int TC, MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond) override {
/// If the specific machine instruction is an instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
- Optional<DestSourcePair>
+ std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;
/// Specialization of \ref TargetInstrInfo::describeLoadedValue, used to
/// enhance debug entry value descriptions for ARM targets.
- Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<ParamLoadedValue>
+ describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
public:
// Return whether the target has an explicit NOP encoding.
return MI.getOperand(3).getReg();
}
- Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
+ Register Reg) const override;
};
/// Get the operands corresponding to the given \p Pred value. By default, the
int computeScale(unsigned GEPElemSize, unsigned MemoryElemSize);
// If the value is a constant, or derived from constants via additions
// and multilications, return its numeric value
- Optional<int64_t> getIfConst(const Value *V);
+ std::optional<int64_t> getIfConst(const Value *V);
// If Inst is an add instruction, check whether one summand is a
// constant. If so, scale this constant and return it together with
// the other summand.
return -1;
}
-Optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
+std::optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) {
const Constant *C = dyn_cast<Constant>(V);
if (C && C->getSplatValue())
- return Optional<int64_t>{C->getUniqueInteger().getSExtValue()};
+ return std::optional<int64_t>{C->getUniqueInteger().getSExtValue()};
if (!isa<Instruction>(V))
- return Optional<int64_t>{};
+ return std::optional<int64_t>{};
const Instruction *I = cast<Instruction>(V);
if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Or ||
I->getOpcode() == Instruction::Mul ||
I->getOpcode() == Instruction::Shl) {
- Optional<int64_t> Op0 = getIfConst(I->getOperand(0));
- Optional<int64_t> Op1 = getIfConst(I->getOperand(1));
+ std::optional<int64_t> Op0 = getIfConst(I->getOperand(0));
+ std::optional<int64_t> Op1 = getIfConst(I->getOperand(1));
if (!Op0 || !Op1)
- return Optional<int64_t>{};
+ return std::optional<int64_t>{};
if (I->getOpcode() == Instruction::Add)
- return Optional<int64_t>{Op0.value() + Op1.value()};
+ return std::optional<int64_t>{Op0.value() + Op1.value()};
if (I->getOpcode() == Instruction::Mul)
- return Optional<int64_t>{Op0.value() * Op1.value()};
+ return std::optional<int64_t>{Op0.value() * Op1.value()};
if (I->getOpcode() == Instruction::Shl)
- return Optional<int64_t>{Op0.value() << Op1.value()};
+ return std::optional<int64_t>{Op0.value() << Op1.value()};
if (I->getOpcode() == Instruction::Or)
- return Optional<int64_t>{Op0.value() | Op1.value()};
+ return std::optional<int64_t>{Op0.value() | Op1.value()};
}
- return Optional<int64_t>{};
+ return std::optional<int64_t>{};
}
// Return true if I is an Or instruction that is equivalent to an add, due to
return ReturnFalse;
Value *Summand;
- Optional<int64_t> Const;
+ std::optional<int64_t> Const;
// Find out which operand the value that is increased is
if ((Const = getIfConst(Add->getOperand(0))))
Summand = Add->getOperand(1);
public:
struct ExtendedProperties {
- llvm::Optional<ComponentType> ElementType;
+ std::optional<ComponentType> ElementType;
// The value ordering of this enumeration is part of the DXIL ABI. Elements
// can only be added to the end, and not removed.
return MI == EndLoop;
}
- Optional<bool>
- createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
- SmallVectorImpl<MachineOperand> &Cond) override {
+ std::optional<bool> createTripCountGreaterCondition(
+ int TC, MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineOperand> &Cond) override {
if (TripCount == -1) {
// Check if we're done with the loop.
Register Done = TII->createVR(MF, MVT::i1);
const MachineFrameInfo &MFI = MF.getFrameInfo();
M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
- Optional<unsigned> RetOpcode;
+ std::optional<unsigned> RetOpcode;
if (MBBI != MBB.end())
RetOpcode = MBBI->getOpcode();
DebugLoc DL;
return MCOperand::createExpr(Expr);
}
-Optional<MCOperand>
+std::optional<MCOperand>
M68kMCInstLower::LowerOperand(const MachineInstr *MI,
const MachineOperand &MO) const {
switch (MO.getType()) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- Optional<MCOperand> MCOp = LowerOperand(MI, MO);
+ std::optional<MCOperand> MCOp = LowerOperand(MI, MO);
if (MCOp.has_value() && MCOp.value().isValid())
OutMI.addOperand(MCOp.value());
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
- Optional<MCOperand> LowerOperand(const MachineInstr *MI,
- const MachineOperand &MO) const;
+ std::optional<MCOperand> LowerOperand(const MachineInstr *MI,
+ const MachineOperand &MO) const;
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
};
MIB.addReg(SrcReg, getKillRegState(KillSrc));
}
-Optional<DestSourcePair>
+std::optional<DestSourcePair>
Mips16InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
if (MI.isMoveReg())
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
- Optional<DestSourcePair> isCopyInstrImpl(const MachineInstr &MI) const override;
+ std::optional<DestSourcePair>
+ isCopyInstrImpl(const MachineInstr &MI) const override;
private:
unsigned getAnalyzableBrOpc(unsigned Opc) const override;
return makeArrayRef(Flags);
}
-Optional<ParamLoadedValue>
+std::optional<ParamLoadedValue>
MipsInstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
DIExpression *Expr =
DIExpression::get(MI.getMF()->getFunction().getContext(), {});
return TargetInstrInfo::describeLoadedValue(MI, Reg);
}
-Optional<RegImmPair> MipsInstrInfo::isAddImmediate(const MachineInstr &MI,
- Register Reg) const {
+std::optional<RegImmPair> MipsInstrInfo::isAddImmediate(const MachineInstr &MI,
+ Register Reg) const {
// TODO: Handle cases where Reg is a super- or sub-register of the
// destination register.
const MachineOperand &Op0 = MI.getOperand(0);
ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const override;
- Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
+ Register Reg) const override;
- Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<ParamLoadedValue>
+ describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
protected:
bool isZeroImm(const MachineOperand &op) const;
/// We check for the common case of 'or', as it's MIPS' preferred instruction
/// for GPRs but we have to check the operands to ensure that is the case.
/// Other move instructions for MIPS are directly identifiable.
-Optional<DestSourcePair>
+std::optional<DestSourcePair>
MipsSEInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
bool isDSPControlWrite = false;
// Condition is made to match the creation of WRDSP/RDDSP copy instruction
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
- Optional<DestSourcePair>
+ std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;
private:
}
protected:
- llvm::Optional<MipsABIInfo> ABI;
+ std::optional<MipsABIInfo> ABI;
MipsABIFlagsSection ABIFlagsSection;
bool GPRInfoSet;
// Helper function template to reduce amount of boilerplate code for
// opcode selection.
-static Optional<unsigned> pickOpcodeForVT(
- MVT::SimpleValueType VT, unsigned Opcode_i8, unsigned Opcode_i16,
- unsigned Opcode_i32, Optional<unsigned> Opcode_i64, unsigned Opcode_f16,
- unsigned Opcode_f16x2, unsigned Opcode_f32, Optional<unsigned> Opcode_f64) {
+static std::optional<unsigned>
+pickOpcodeForVT(MVT::SimpleValueType VT, unsigned Opcode_i8,
+ unsigned Opcode_i16, unsigned Opcode_i32,
+ std::optional<unsigned> Opcode_i64, unsigned Opcode_f16,
+ unsigned Opcode_f16x2, unsigned Opcode_f32,
+ std::optional<unsigned> Opcode_f64) {
switch (VT) {
case MVT::i1:
case MVT::i8:
SDValue N1 = N->getOperand(1);
SDValue Addr;
SDValue Offset, Base;
- Optional<unsigned> Opcode;
+ std::optional<unsigned> Opcode;
MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
if (SelectDirectAddr(N1, Addr)) {
SDValue Chain = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue Addr, Offset, Base;
- Optional<unsigned> Opcode;
+ std::optional<unsigned> Opcode;
SDLoc DL(N);
SDNode *LD;
MemSDNode *MemSD = cast<MemSDNode>(N);
Mem = cast<MemSDNode>(N);
}
- Optional<unsigned> Opcode;
+ std::optional<unsigned> Opcode;
SDLoc DL(N);
SDNode *LD;
SDValue Base, Offset, Addr;
SDValue BasePtr = ST->getBasePtr();
SDValue Addr;
SDValue Offset, Base;
- Optional<unsigned> Opcode;
+ std::optional<unsigned> Opcode;
MVT::SimpleValueType SourceVT =
Value.getNode()->getSimpleValueType(0).SimpleTy;
SDValue Chain = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue Addr, Offset, Base;
- Optional<unsigned> Opcode;
+ std::optional<unsigned> Opcode;
SDLoc DL(N);
SDNode *ST;
EVT EltVT = Op1.getValueType();
EVT EltVT = Node->getValueType(0);
EVT MemVT = Mem->getMemoryVT();
- Optional<unsigned> Opcode;
+ std::optional<unsigned> Opcode;
switch (VecSize) {
default:
// Determine target opcode
// If we have an i1, use an 8-bit store. The lowering code in
// NVPTXISelLowering will have already emitted an upcast.
- Optional<unsigned> Opcode = 0;
+ std::optional<unsigned> Opcode = 0;
switch (NumElts) {
default:
return false;
// Determine target opcode
// If we have an i1, use an 8-bit store. The lowering code in
// NVPTXISelLowering will have already emitted an upcast.
- Optional<unsigned> Opcode = 0;
+ std::optional<unsigned> Opcode = 0;
switch (N->getOpcode()) {
default:
switch (NumElts) {
}
SmallVector<SDValue, 16> ProxyRegOps;
- SmallVector<Optional<MVT>, 16> ProxyRegTruncates;
+ SmallVector<std::optional<MVT>, 16> ProxyRegTruncates;
// Generate loads from param memory/moves from registers for result
if (Ins.size() > 0) {
ProxyRegOps.push_back(RetVal.getValue(j));
if (needTruncate)
- ProxyRegTruncates.push_back(Optional<MVT>(Ins[VecIdx + j].VT));
+ ProxyRegTruncates.push_back(std::optional<MVT>(Ins[VecIdx + j].VT));
else
- ProxyRegTruncates.push_back(Optional<MVT>());
+ ProxyRegTruncates.push_back(std::optional<MVT>());
}
Chain = RetVal.getValue(NumElts);
// instruction pair, return a value, otherwise return None. A true returned
// value means the instruction is the PLDpc and a false value means it is
// the user instruction.
- Optional<bool> IsPartOfGOTToPCRelPair = isPartOfGOTToPCRelPair(Inst, STI);
+ std::optional<bool> IsPartOfGOTToPCRelPair =
+ isPartOfGOTToPCRelPair(Inst, STI);
// User of the GOT-indirect address.
// For example, the load that will get the relocation as follows:
// at the opcode and in the case of PLDpc we will return true. For the load
// (or store) this function will return false indicating it has found the second
// instruciton in the pair.
-Optional<bool> llvm::isPartOfGOTToPCRelPair(const MCInst &Inst,
- const MCSubtargetInfo &STI) {
+std::optional<bool> llvm::isPartOfGOTToPCRelPair(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
// Need at least two operands.
if (Inst.getNumOperands() < 2)
return std::nullopt;
// Check if the instruction Inst is part of a pair of instructions that make up
// a link time GOT PC Rel optimization.
-Optional<bool> isPartOfGOTToPCRelPair(const MCInst &Inst,
- const MCSubtargetInfo &STI);
+std::optional<bool> isPartOfGOTToPCRelPair(const MCInst &Inst,
+ const MCSubtargetInfo &STI);
MCELFStreamer *createPPCELFStreamer(MCContext &Context,
std::unique_ptr<MCAsmBackend> MAB,
} // end anonymous namespace
-static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
- switch (Pred) {
+static std::optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
+ switch (Pred) {
// These are not representable with any single compare.
case CmpInst::FCMP_FALSE:
case CmpInst::FCMP_TRUE:
// For now, just try the simplest case where it's fed by a compare.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
if (isValueAvailable(CI)) {
- Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
+ std::optional<PPC::Predicate> OptPPCPred =
+ getComparePred(CI->getPredicate());
if (!OptPPCPred)
return false;
return MI == EndLoop;
}
- Optional<bool>
- createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
- SmallVectorImpl<MachineOperand> &Cond) override {
+ std::optional<bool> createTripCountGreaterCondition(
+ int TC, MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineOperand> &Cond) override {
if (TripCount == -1) {
// Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1,
// so we don't need to generate any thing here.
/// SExt. If \p SExt is None, this returns the source of this operand.
/// \see ::getSource().
SDValue getOrCreateExtendedOp(const SDNode *Root, SelectionDAG &DAG,
- Optional<bool> SExt) const {
+ std::optional<bool> SExt) const {
if (!SExt.has_value())
return OrigOperand;
}
}
- using CombineToTry = std::function<Optional<CombineResult>(
+ using CombineToTry = std::function<std::optional<CombineResult>(
SDNode * /*Root*/, const NodeExtensionHelper & /*LHS*/,
const NodeExtensionHelper & /*RHS*/)>;
unsigned TargetOpcode;
// No value means no extension is needed. If extension is needed, the value
// indicates if it needs to be sign extended.
- Optional<bool> SExtLHS;
- Optional<bool> SExtRHS;
+ std::optional<bool> SExtLHS;
+ std::optional<bool> SExtRHS;
/// Root of the combine.
SDNode *Root;
/// LHS of the TargetOpcode.
NodeExtensionHelper RHS;
CombineResult(unsigned TargetOpcode, SDNode *Root,
- const NodeExtensionHelper &LHS, Optional<bool> SExtLHS,
- const NodeExtensionHelper &RHS, Optional<bool> SExtRHS)
+ const NodeExtensionHelper &LHS, std::optional<bool> SExtLHS,
+ const NodeExtensionHelper &RHS, std::optional<bool> SExtRHS)
: TargetOpcode(TargetOpcode), SExtLHS(SExtLHS), SExtRHS(SExtRHS),
Root(Root), LHS(LHS), RHS(RHS) {}
///
/// \returns None if the pattern doesn't match or a CombineResult that can be
/// used to apply the pattern.
-static Optional<CombineResult>
+static std::optional<CombineResult>
canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
const NodeExtensionHelper &RHS, bool AllowSExt,
bool AllowZExt) {
///
/// \returns None if the pattern doesn't match or a CombineResult that can be
/// used to apply the pattern.
-static Optional<CombineResult>
+static std::optional<CombineResult>
canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
const NodeExtensionHelper &RHS) {
return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
///
/// \returns None if the pattern doesn't match or a CombineResult that can be
/// used to apply the pattern.
-static Optional<CombineResult> canFoldToVW_W(SDNode *Root,
- const NodeExtensionHelper &LHS,
- const NodeExtensionHelper &RHS) {
+static std::optional<CombineResult>
+canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
if (!RHS.areVLAndMaskCompatible(Root))
return std::nullopt;
///
/// \returns None if the pattern doesn't match or a CombineResult that can be
/// used to apply the pattern.
-static Optional<CombineResult>
+static std::optional<CombineResult>
canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
const NodeExtensionHelper &RHS) {
return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
///
/// \returns None if the pattern doesn't match or a CombineResult that can be
/// used to apply the pattern.
-static Optional<CombineResult>
+static std::optional<CombineResult>
canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
const NodeExtensionHelper &RHS) {
return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/false,
///
/// \returns None if the pattern doesn't match or a CombineResult that can be
/// used to apply the pattern.
-static Optional<CombineResult> canFoldToVW_SU(SDNode *Root,
- const NodeExtensionHelper &LHS,
- const NodeExtensionHelper &RHS) {
+static std::optional<CombineResult>
+canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS,
+ const NodeExtensionHelper &RHS) {
if (!LHS.SupportsSExt || !RHS.SupportsZExt)
return std::nullopt;
if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root))
for (NodeExtensionHelper::CombineToTry FoldingStrategy :
FoldingStrategies) {
- Optional<CombineResult> Res = FoldingStrategy(N, LHS, RHS);
+ std::optional<CombineResult> Res = FoldingStrategy(N, LHS, RHS);
if (Res) {
Matched = true;
CombinesToApply.push_back(*Res);
}
static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
- Optional<unsigned> FirstMaskArgument,
+ std::optional<unsigned> FirstMaskArgument,
CCState &State, const RISCVTargetLowering &TLI) {
const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
if (RC == &RISCV::VRRegClass) {
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- Optional<unsigned> FirstMaskArgument) {
+ std::optional<unsigned> FirstMaskArgument) {
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
assert(XLen == 32 || XLen == 64);
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
}
template <typename ArgTy>
-static Optional<unsigned> preAssignMask(const ArgTy &Args) {
+static std::optional<unsigned> preAssignMask(const ArgTy &Args) {
for (const auto &ArgIdx : enumerate(Args)) {
MVT ArgVT = ArgIdx.value().VT;
if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
unsigned NumArgs = Ins.size();
FunctionType *FType = MF.getFunction().getFunctionType();
- Optional<unsigned> FirstMaskArgument;
+ std::optional<unsigned> FirstMaskArgument;
if (Subtarget.hasVInstructions())
FirstMaskArgument = preAssignMask(Ins);
CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
unsigned NumArgs = Outs.size();
- Optional<unsigned> FirstMaskArgument;
+ std::optional<unsigned> FirstMaskArgument;
if (Subtarget.hasVInstructions())
FirstMaskArgument = preAssignMask(Outs);
ISD::ArgFlagsTy ArgFlags, CCState &State,
bool IsFixed, bool IsRet, Type *OrigTy,
const RISCVTargetLowering &TLI,
- Optional<unsigned> FirstMaskArgument) {
+ std::optional<unsigned> FirstMaskArgument) {
// X5 and X6 might be used for save-restore libcall.
static const MCPhysReg GPRList[] = {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
- Optional<unsigned> FirstMaskArgument;
+ std::optional<unsigned> FirstMaskArgument;
if (Subtarget.hasVInstructions())
FirstMaskArgument = preAssignMask(Outs);
ISD::ArgFlagsTy ArgFlags, CCState &State,
bool IsFixed, bool IsRet, Type *OrigTy,
const RISCVTargetLowering &TLI,
- Optional<unsigned> FirstMaskArgument);
+ std::optional<unsigned> FirstMaskArgument);
void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
/// Get the EEW for a load or store instruction. Return None if MI is not
/// a load or store which ignores SEW.
-static Optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
+static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
switch (getRVVMCOpcode(MI.getOpcode())) {
default:
return std::nullopt;
InstrInfo.setAVLReg(RISCV::NoRegister);
}
#ifndef NDEBUG
- if (Optional<unsigned> EEW = getEEWForLoadStore(MI)) {
+ if (std::optional<unsigned> EEW = getEEWForLoadStore(MI)) {
assert(SEW == EEW && "Initial SEW doesn't match expected EEW");
}
#endif
return MI.isAsCheapAsAMove();
}
-Optional<DestSourcePair>
+std::optional<DestSourcePair>
RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
if (MI.isMoveReg())
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
return true;
}
-Optional<std::pair<unsigned, unsigned>>
+std::optional<std::pair<unsigned, unsigned>>
RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
switch (Opcode) {
default:
bool isAsCheapAsAMove(const MachineInstr &MI) const override;
- Optional<DestSourcePair>
+ std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;
bool verifyInstruction(const MachineInstr &MI,
// expect to see a FrameIndex operand.
bool isRVVSpill(const MachineInstr &MI);
-Optional<std::pair<unsigned, unsigned>> isRVVSpillForZvlsseg(unsigned Opcode);
+std::optional<std::pair<unsigned, unsigned>>
+isRVVSpillForZvlsseg(unsigned Opcode);
bool isFaultFirstLoad(const MachineInstr &MI);
/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
/// and external instruction \p Set.
namespace SPIRV {
-Optional<bool> lowerBuiltin(const StringRef DemangledCall,
- SPIRV::InstructionSet::InstructionSet Set,
- MachineIRBuilder &MIRBuilder,
- const Register OrigRet, const Type *OrigRetTy,
- const SmallVectorImpl<Register> &Args,
- SPIRVGlobalRegistry *GR) {
+std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
+ SPIRV::InstructionSet::InstructionSet Set,
+ MachineIRBuilder &MIRBuilder,
+ const Register OrigRet, const Type *OrigRetTy,
+ const SmallVectorImpl<Register> &Args,
+ SPIRVGlobalRegistry *GR) {
LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
// SPIR-V type and return register.
/// Register(0) otherwise.
/// \p OrigRetTy is the type of the \p OrigRet.
/// \p Args are the arguments of the lowered builtin call.
-Optional<bool> lowerBuiltin(const StringRef DemangledCall,
- InstructionSet::InstructionSet Set,
- MachineIRBuilder &MIRBuilder,
- const Register OrigRet, const Type *OrigRetTy,
- const SmallVectorImpl<Register> &Args,
- SPIRVGlobalRegistry *GR);
+std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
+ InstructionSet::InstructionSet Set,
+ MachineIRBuilder &MIRBuilder,
+ const Register OrigRet, const Type *OrigRetTy,
+ const SmallVectorImpl<Register> &Args,
+ SPIRVGlobalRegistry *GR);
/// Handles the translation of the provided special opaque/builtin type \p Type
/// to SPIR-V type. Generates the corresponding machine instructions for the
/// target type or gets the already existing OpType<...> register from the
struct Requirements {
const bool IsSatisfiable;
- const Optional<Capability::Capability> Cap;
+ const std::optional<Capability::Capability> Cap;
const ExtensionList Exts;
const unsigned MinVer; // 0 if no min version is required.
const unsigned MaxVer; // 0 if no max version is required.
Requirements(bool IsSatisfiable = false,
- Optional<Capability::Capability> Cap = {},
+ std::optional<Capability::Capability> Cap = {},
ExtensionList Exts = {}, unsigned MinVer = 0,
unsigned MaxVer = 0)
: IsSatisfiable(IsSatisfiable), Cap(Cap), Exts(Exts), MinVer(MinVer),
}
/// \returns the VVP_* SDNode opcode corresponsing to \p OC.
-Optional<unsigned> getVVPOpcode(unsigned Opcode) {
+std::optional<unsigned> getVVPOpcode(unsigned Opcode) {
switch (Opcode) {
case ISD::MLOAD:
return VEISD::VVP_LOAD;
}
// Return the AVL operand position for this VVP or VEC Op.
-Optional<int> getAVLPos(unsigned Opc) {
+std::optional<int> getAVLPos(unsigned Opc) {
// This is only available for VP SDNodes
auto PosOpt = ISD::getVPExplicitVectorLengthIdx(Opc);
if (PosOpt)
return std::nullopt;
}
-Optional<int> getMaskPos(unsigned Opc) {
+std::optional<int> getMaskPos(unsigned Opc) {
// This is only available for VP SDNodes
auto PosOpt = ISD::getVPMaskIdx(Opc);
if (PosOpt)
return SDValue();
}
-Optional<EVT> getIdiomaticVectorType(SDNode *Op) {
+std::optional<EVT> getIdiomaticVectorType(SDNode *Op) {
unsigned OC = Op->getOpcode();
// For memory ops -> the transfered data type
namespace llvm {
-Optional<unsigned> getVVPOpcode(unsigned Opcode);
+std::optional<unsigned> getVVPOpcode(unsigned Opcode);
bool isVVPUnaryOp(unsigned Opcode);
bool isVVPBinaryOp(unsigned Opcode);
//
/// AVL Functions {
// The AVL operand position of this node.
-Optional<int> getAVLPos(unsigned);
+std::optional<int> getAVLPos(unsigned);
// Whether this is a LEGALAVL node.
bool isLegalAVL(SDValue AVL);
SDValue getNodeAVL(SDValue);
// Mask position of this node.
-Optional<int> getMaskPos(unsigned);
+std::optional<int> getMaskPos(unsigned);
SDValue getNodeMask(SDValue);
/// Node Properties {
-Optional<EVT> getIdiomaticVectorType(SDNode *Op);
+std::optional<EVT> getIdiomaticVectorType(SDNode *Op);
SDValue getLoadStoreStride(SDValue Op, VECustomDAG &CDAG);
/// getNode {
SDValue getNode(unsigned OC, SDVTList VTL, ArrayRef<SDValue> OpV,
- Optional<SDNodeFlags> Flags = std::nullopt) const {
+ std::optional<SDNodeFlags> Flags = std::nullopt) const {
auto N = DAG.getNode(OC, DL, VTL, OpV);
if (Flags)
N->setFlags(*Flags);
}
SDValue getNode(unsigned OC, ArrayRef<EVT> ResVT, ArrayRef<SDValue> OpV,
- Optional<SDNodeFlags> Flags = std::nullopt) const {
+ std::optional<SDNodeFlags> Flags = std::nullopt) const {
auto N = DAG.getNode(OC, DL, ResVT, OpV);
if (Flags)
N->setFlags(*Flags);
}
SDValue getNode(unsigned OC, EVT ResVT, ArrayRef<SDValue> OpV,
- Optional<SDNodeFlags> Flags = std::nullopt) const {
+ std::optional<SDNodeFlags> Flags = std::nullopt) const {
auto N = DAG.getNode(OC, DL, ResVT, OpV);
if (Flags)
N->setFlags(*Flags);
auto ElemTypeName = expectIdent();
if (ElemTypeName.empty())
return true;
- Optional<wasm::ValType> ElemType = WebAssembly::parseType(ElemTypeName);
+ std::optional<wasm::ValType> ElemType =
+ WebAssembly::parseType(ElemTypeName);
if (!ElemType)
return error("Unknown type in .tabletype directive: ", ElemTypeTok);
}
bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc,
- Optional<wasm::ValType> EVT) {
+ std::optional<wasm::ValType> EVT) {
if (Stack.empty()) {
return typeError(ErrorLoc,
EVT ? StringRef("empty stack while popping ") +
void dumpTypeStack(Twine Msg);
bool typeError(SMLoc ErrorLoc, const Twine &Msg);
- bool popType(SMLoc ErrorLoc, Optional<wasm::ValType> EVT);
+ bool popType(SMLoc ErrorLoc, std::optional<wasm::ValType> EVT);
bool popRefType(SMLoc ErrorLoc);
bool getLocal(SMLoc ErrorLoc, const MCInst &Inst, wasm::ValType &Type);
bool checkEnd(SMLoc ErrorLoc, bool PopVals = false);
using namespace llvm;
-Optional<wasm::ValType> WebAssembly::parseType(StringRef Type) {
+std::optional<wasm::ValType> WebAssembly::parseType(StringRef Type) {
// FIXME: can't use StringSwitch because wasm::ValType doesn't have a
// "invalid" value.
if (Type == "i32")
// Convert StringRef to ValType / HealType / BlockType
-Optional<wasm::ValType> parseType(StringRef Type);
+std::optional<wasm::ValType> parseType(StringRef Type);
BlockType parseBlockType(StringRef Type);
MVT parseMVT(StringRef Type);
// SelectionDAGISel::runOnMachineFunction. We have to do it in two places
// because we want to do it while building the selection DAG for uses of alloca,
// but not all alloca instructions are used so we have to follow up afterwards.
-Optional<unsigned>
+std::optional<unsigned>
WebAssemblyFrameLowering::getLocalForStackObject(MachineFunction &MF,
int FrameIndex) {
MachineFrameInfo &MFI = MF.getFrameInfo();
// Returns the index of the WebAssembly local to which the stack object
// FrameIndex in MF should be allocated, or None.
- static Optional<unsigned> getLocalForStackObject(MachineFunction &MF,
- int FrameIndex);
+ static std::optional<unsigned> getLocalForStackObject(MachineFunction &MF,
+ int FrameIndex);
static unsigned getSPReg(const MachineFunction &MF);
static unsigned getFPReg(const MachineFunction &MF);
return false;
}
-static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
+static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
+ SelectionDAG &DAG) {
const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
if (!FI)
return std::nullopt;
SN->getMemoryVT(), SN->getMemOperand());
}
- if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
+ if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
if (!Offset->isUndef())
report_fatal_error("unexpected offset when storing to webassembly local",
false);
LN->getMemoryVT(), LN->getMemOperand());
}
- if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
+ if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
if (!Offset->isUndef())
report_fatal_error(
"unexpected offset when loading from webassembly local", false);
bool Is64BitAlloca = MI->getOpcode() == X86::DYN_ALLOCA_64;
assert(SlotSize == 4 || SlotSize == 8);
- Optional<MachineFunction::DebugInstrOperandPair> InstrNum;
+ std::optional<MachineFunction::DebugInstrOperandPair> InstrNum;
if (unsigned Num = MI->peekDebugInstrNum()) {
// Operand 2 of DYN_ALLOCAs contains the stack def.
InstrNum = {Num, 2};
void X86FrameLowering::emitStackProbe(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
- Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
+ std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.isTargetWindowsCoreCLR()) {
if (InProlog) {
void X86FrameLowering::emitStackProbeCall(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
- Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
+ std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
// FIXME: Add indirect thunk support and remove this.
void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
bool InProlog,
- Optional<MachineFunction::DebugInstrOperandPair>
+ std::optional<MachineFunction::DebugInstrOperandPair>
InstrNum = std::nullopt) const;
bool stackProbeFunctionModifiesSP() const override;
void emitStackProbeCall(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
- Optional<MachineFunction::DebugInstrOperandPair> InstrNum) const;
+ std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const;
/// Emit target stack probe as an inline sequence.
void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB,
// If we have BMI2's BZHI, we are ok with muti-use patterns.
// Else, if we only have BMI1's BEXTR, we require one-use.
const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
- auto checkUses = [AllowExtraUsesByDefault](SDValue Op, unsigned NUses,
- Optional<bool> AllowExtraUses) {
+ auto checkUses = [AllowExtraUsesByDefault](
+ SDValue Op, unsigned NUses,
+ std::optional<bool> AllowExtraUses) {
return AllowExtraUses.value_or(AllowExtraUsesByDefault) ||
Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
};
auto checkOneUse = [checkUses](SDValue Op,
- Optional<bool> AllowExtraUses = std::nullopt) {
+ std::optional<bool> AllowExtraUses =
+ std::nullopt) {
return checkUses(Op, 1, AllowExtraUses);
};
auto checkTwoUse = [checkUses](SDValue Op,
- Optional<bool> AllowExtraUses = std::nullopt) {
+ std::optional<bool> AllowExtraUses =
+ std::nullopt) {
return checkUses(Op, 2, AllowExtraUses);
};
report_fatal_error("Cannot emit physreg copy instruction");
}
-Optional<DestSourcePair>
+std::optional<DestSourcePair>
X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
if (MI.isMoveReg())
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
}
}
-Optional<ExtAddrMode>
+std::optional<ExtAddrMode>
X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const {
const MCInstrDesc &Desc = MemI.getDesc();
bool X86InstrInfo::verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const {
- Optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr);
+ std::optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr);
if (!AMOrNone)
return true;
/// If \p DescribedReg overlaps with the MOVrr instruction's destination
/// register then, if possible, describe the value in terms of the source
/// register.
-static Optional<ParamLoadedValue>
+static std::optional<ParamLoadedValue>
describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
const TargetRegisterInfo *TRI) {
Register DestReg = MI.getOperand(0).getReg();
return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
}
-Optional<ParamLoadedValue>
+std::optional<ParamLoadedValue>
X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
const MachineOperand *Op = nullptr;
DIExpression *Expr = nullptr;
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
- Optional<ExtAddrMode>
+ std::optional<ExtAddrMode>
getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const override;
return MI.getDesc().TSFlags & X86II::LOCK;
}
- Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
- Register Reg) const override;
+ std::optional<ParamLoadedValue>
+ describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
protected:
/// Commutes the operands in the given instruction by changing the operands
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
- Optional<DestSourcePair>
+ std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;
private:
public:
X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
- Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI,
- const MachineOperand &MO) const;
+ std::optional<MCOperand> LowerMachineOperand(const MachineInstr *MI,
+ const MachineOperand &MO) const;
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
}
-Optional<MCOperand>
+std::optional<MCOperand>
X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
const MachineOperand &MO) const {
switch (MO.getType()) {
/// determine if we should insert tilerelease in frame lowering.
bool HasVirtualTileReg = false;
- Optional<int> SwiftAsyncContextFrameIdx;
+ std::optional<int> SwiftAsyncContextFrameIdx;
// Preallocated fields are only used during isel.
// FIXME: Can we find somewhere else to store these?
bool hasVirtualTileReg() const { return HasVirtualTileReg; }
void setHasVirtualTileReg(bool v) { HasVirtualTileReg = v; }
- Optional<int> getSwiftAsyncContextFrameIdx() const {
+ std::optional<int> getSwiftAsyncContextFrameIdx() const {
return SwiftAsyncContextFrameIdx;
}
void setSwiftAsyncContextFrameIdx(int v) { SwiftAsyncContextFrameIdx = v; }
// See if we can sink hardening the loaded value.
auto SinkCheckToSingleUse =
- [&](MachineInstr &MI) -> Optional<MachineInstr *> {
+ [&](MachineInstr &MI) -> std::optional<MachineInstr *> {
Register DefReg = MI.getOperand(0).getReg();
// We need to find a single use which we can sink the check. We can
};
MachineInstr *MI = &InitialMI;
- while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
+ while (std::optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
// Update which MI we're checking now.
MI = *SingleUse;
if (!MI)