#ifndef LLVM_MC_MCINSTRDESC_H
#define LLVM_MC_MCINSTRDESC_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/MC/MCRegister.h"
int getOperandConstraint(unsigned OpNum,
MCOI::OperandConstraint Constraint) const {
if (OpNum < NumOperands &&
- (OpInfo[OpNum].Constraints & (1 << Constraint))) {
+ (operands()[OpNum].Constraints & (1 << Constraint))) {
unsigned ValuePos = 4 + Constraint * 4;
- return (int)(OpInfo[OpNum].Constraints >> ValuePos) & 0x0f;
+ return (int)(operands()[OpNum].Constraints >> ValuePos) & 0x0f;
}
return -1;
}
const_opInfo_iterator opInfo_begin() const { return OpInfo; }
const_opInfo_iterator opInfo_end() const { return OpInfo + NumOperands; }
- iterator_range<const_opInfo_iterator> operands() const {
- return make_range(opInfo_begin(), opInfo_end());
+ ArrayRef<MCOperandInfo> operands() const {
+ return ArrayRef(OpInfo, NumOperands);
}
/// Return the number of MachineOperands that are register
int findFirstPredOperandIdx() const {
if (isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (OpInfo[i].isPredicate())
+ if (operands()[i].isPredicate())
return i;
}
return -1;
const MachineRegisterInfo &MRI) const {
SmallVector<LLT, 8> Types;
SmallBitVector SeenTypes(8);
- const MCOperandInfo *OpInfo = MI.getDesc().OpInfo;
+ ArrayRef<MCOperandInfo> OpInfo = MI.getDesc().operands();
// FIXME: probably we'll need to cache the results here somehow?
for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) {
if (!OpInfo[i].isGenericType())
for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) {
const MCInstrDesc &MCID = MII.get(Opcode);
const unsigned NumTypeIdxs = std::accumulate(
- MCID.opInfo_begin(), MCID.opInfo_end(), 0U,
+ MCID.operands().begin(), MCID.operands().end(), 0U,
[](unsigned Acc, const MCOperandInfo &OpInfo) {
return OpInfo.isGenericType()
? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc)
: Acc;
});
const unsigned NumImmIdxs = std::accumulate(
- MCID.opInfo_begin(), MCID.opInfo_end(), 0U,
+ MCID.operands().begin(), MCID.operands().end(), 0U,
[](unsigned Acc, const MCOperandInfo &OpInfo) {
return OpInfo.isGenericImm()
? std::max(OpInfo.getGenericImmIndex() + 1U, Acc)
const MCInstrDesc &MCID = getDesc();
if (MCID.isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (MCID.OpInfo[i].isPredicate())
+ if (MCID.operands()[i].isPredicate())
return i;
}
if (isVariadic() || OpIdx >= getNumExplicitOperands())
return MRI.getType(Op.getReg());
- auto &OpInfo = getDesc().OpInfo[OpIdx];
+ auto &OpInfo = getDesc().operands()[OpIdx];
if (!OpInfo.isGenericType())
return MRI.getType(Op.getReg());
SmallVector<LLT, 4> Types;
for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
I != E; ++I) {
- if (!MCID.OpInfo[I].isGenericType())
+ if (!MCID.operands()[I].isGenericType())
continue;
// Generic instructions specify type equality constraints between some of
// their operands. Make sure these are consistent.
- size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
+ size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
Types.resize(std::max(TypeIdx + 1, Types.size()));
const MachineOperand *MO = &MI->getOperand(I);
// The first MCID.NumDefs operands must be explicit register defines
if (MONum < NumDefs) {
- const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
+ const MCOperandInfo &MCOI = MCID.operands()[MONum];
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
else if (!MO->isDef() && !MCOI.isOptionalDef())
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < MCID.getNumOperands()) {
- const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
+ const MCOperandInfo &MCOI = MCID.operands()[MONum];
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end. Check non-variadic operands only.
bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
RC = VTRC;
}
- if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) {
+ if (!II.operands().empty() && II.operands()[i].isOptionalDef()) {
// Optional def must be a physical register.
VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
assert(VRBase.isPhysical());
const MCInstrDesc &MCID = MIB->getDesc();
bool isOptDef = IIOpNum < MCID.getNumOperands() &&
- MCID.OpInfo[IIOpNum].isOptionalDef();
+ MCID.operands()[IIOpNum].isOptionalDef();
// If the instruction requires a register in a different class, create
// a new virtual register and copy the value into it, but first attempt to
// of %noreg. When the OptionalDef is set to a valid register, we need to
// handle it in the same way as an ImplicitDef.
for (unsigned i = 0; i < MCID.getNumDefs(); ++i)
- if (MCID.OpInfo[i].isOptionalDef()) {
+ if (MCID.operands()[i].isOptionalDef()) {
const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues());
Register Reg = cast<RegisterSDNode>(OptionalDef)->getReg();
CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
if (OpNum >= MCID.getNumOperands())
return nullptr;
- short RegClass = MCID.OpInfo[OpNum].RegClass;
- if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
+ short RegClass = MCID.operands()[OpNum].RegClass;
+ if (MCID.operands()[OpNum].isLookupPtrRegClass())
return TRI->getPointerRegClass(MF, RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
return false;
for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
- if (MCID.OpInfo[i].isPredicate()) {
+ if (MCID.operands()[i].isPredicate()) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
MO.setReg(Pred[j].getReg());
// If DefIdx does not exist in the model (e.g. implicit defs), then return
// unit latency (defaultDefLatency may be too conservative).
#ifndef NDEBUG
- if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
- && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
- && SchedModel.isComplete()) {
+ if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() &&
+ !DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() &&
+ SchedModel.isComplete()) {
errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
<< *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
llvm_unreachable("incomplete machine model");
InputDecls.push_back(OpDecl);
InputDeclsAddressOf.push_back(Operand.needAddressOf());
InputConstraints.push_back(Constraint.str());
- if (Desc.OpInfo[i - 1].isBranchTarget())
+ if (Desc.operands()[i - 1].isBranchTarget())
AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size(), 0,
Restricted);
else
InputDecls.push_back(OpDecl);
InputDeclsAddressOf.push_back(Operand.needAddressOf());
InputConstraints.push_back(Constraint.str());
- if (Desc.OpInfo[i - 1].isBranchTarget())
+ if (Desc.operands()[i - 1].isBranchTarget())
AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size());
else
AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size());
if (!Op.isReg())
continue;
- if (MCDesc.OpInfo[CurrentDef].isOptionalDef()) {
+ if (MCDesc.operands()[CurrentDef].isOptionalDef()) {
OptionalDefIdx = CurrentDef++;
continue;
}
} while (I != ChainBegin);
// Make sure we allocate in-order, to get the cheapest registers first.
- unsigned RegClassID = ChainBegin->getDesc().OpInfo[0].RegClass;
+ unsigned RegClassID = ChainBegin->getDesc().operands()[0].RegClass;
auto Ord = RCI.getOrder(TRI->getRegClass(RegClassID));
for (auto Reg : Ord) {
if (!Units.available(Reg))
// operand for the accumulator (ZA) or implicit immediate zero which isn't
// encoded, manually insert operand.
for (unsigned i = 0; i < Desc.getNumOperands(); i++) {
- if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_REGISTER) {
- switch (Desc.OpInfo[i].RegClass) {
+ if (Desc.operands()[i].OperandType == MCOI::OPERAND_REGISTER) {
+ switch (Desc.operands()[i].RegClass) {
default:
break;
case AArch64::MPRRegClassID:
MI.insert(MI.begin() + i, MCOperand::createReg(AArch64::ZT0));
break;
}
- } else if (Desc.OpInfo[i].OperandType ==
+ } else if (Desc.operands()[i].OperandType ==
AArch64::OPERAND_IMPLICIT_IMM_0) {
MI.insert(MI.begin() + i, MCOperand::createImm(0));
}
// condition code) and cbz (where it is a register).
const auto &Desc = Info->get(Inst.getOpcode());
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; i++) {
- if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_PCREL) {
+ if (Desc.operands()[i].OperandType == MCOI::OPERAND_PCREL) {
int64_t Imm = Inst.getOperand(i).getImm();
if (Inst.getOpcode() == AArch64::ADRP)
Target = (Addr & -4096) + Imm * 4096;
unsigned OpIdx = Desc.getNumDefs() + OpNo;
if (OpIdx >= Desc.getNumOperands())
return nullptr;
- int RegClass = Desc.OpInfo[OpIdx].RegClass;
+ int RegClass = Desc.operands()[OpIdx].RegClass;
if (RegClass == -1)
return nullptr;
}
APInt Literal(64, Val);
- uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
+ uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;
if (Imm.IsFPImm) { // We got fp literal token
switch (OpTy) {
case 4:
return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
case 2: {
- const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
+ const unsigned OperandType = Desc.operands()[OpIdx].OperandType;
if (OperandType == AMDGPU::OPERAND_REG_IMM_INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_AC_INT16)
}
} else { // Expression or a literal
- if (Desc.OpInfo[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
+ if (Desc.operands()[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
continue; // special operand like VINTERP attr_chan
// An instruction may use only one literal.
return true;
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
- if (TRI->getRegClass(Desc.OpInfo[0].RegClass).getSizeInBits() <= 128)
+ if (TRI->getRegClass(Desc.operands()[0].RegClass).getSizeInBits() <= 128)
return true;
if (TRI->regsOverlap(Src2Reg, DstReg)) {
}
static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
+ return
// 1. This operand is input modifiers
- return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
+ Desc.operands()[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
// 2. This is not last operand
&& Desc.NumOperands > (OpNum + 1)
// 3. Next operand is register class
- && Desc.OpInfo[OpNum + 1].RegClass != -1
+ && Desc.operands()[OpNum + 1].RegClass != -1
// 4. Next register is not tied to any other operand
- && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
+ && Desc.getOperandConstraint(OpNum + 1,
+ MCOI::OperandConstraint::TIED_TO) == -1;
}
void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
} else if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
} else if (Op.isImm() &&
- Desc.OpInfo[Inst.getNumOperands()].RegClass != -1) {
+ Desc.operands()[Inst.getNumOperands()].RegClass != -1) {
assert(!Op.IsImmKindLiteral() && "Cannot use literal with DPP");
Op.addImmOperands(Inst, 1);
} else if (Op.isImm()) {
} else {
for (unsigned i = 0; i < NSAArgs; ++i) {
const unsigned VAddrIdx = VAddr0Idx + 1 + i;
- auto VAddrRCID = MCII->get(MI.getOpcode()).OpInfo[VAddrIdx].RegClass;
+ auto VAddrRCID =
+ MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
MI.insert(MI.begin() + VAddrIdx,
createRegOperand(VAddrRCID, Bytes[i]));
}
// Widen the register to the correct number of enabled channels.
unsigned NewVdata = AMDGPU::NoRegister;
if (DstSize != Info->VDataDwords) {
- auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
+ auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
// Get first subregister of VData
unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
- auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
+ auto AddrRCID = MCII->get(NewOpcode).operands()[VAddr0Idx].RegClass;
NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
&MRI.getRegClass(AddrRCID));
if (NewVAddr0 == AMDGPU::NoRegister)
assert(DescNumOps == MI.getNumOperands());
for (unsigned I = 0; I < DescNumOps; ++I) {
auto &Op = MI.getOperand(I);
- auto OpType = Desc.OpInfo[I].OperandType;
+ auto OpType = Desc.operands()[I].OperandType;
bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
int VDataRCID = -1;
if (VDataIdx != -1)
- VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
+ VDataRCID = Desc.operands()[VDataIdx].RegClass;
if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
// There is no hazard if the instruction does not use vector regs
if (TII->isMIMG(MI)) {
int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
assert(SRsrcIdx != -1 &&
- AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
+ AMDGPU::getRegBitWidth(Desc.operands()[SRsrcIdx].RegClass) == 256);
(void)SRsrcIdx;
}
if (TII->isFLAT(MI)) {
int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
- if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
+ if (AMDGPU::getRegBitWidth(Desc.operands()[DataIdx].RegClass) > 64)
return DataIdx;
}
return true;
} else {
const MCInstrDesc &InstDesc = I.getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
if (!TII.isInlineConstant(Op, OpInfo))
return true;
}
// Check if operand register class contains register used.
// Intention: print disassembler message when invalid code is decoded,
// for example sgpr register used in VReg or VISrc(VReg or imm) operand.
- int RCID = Desc.OpInfo[OpNo].RegClass;
+ int RCID = Desc.operands()[OpNo].RegClass;
if (RCID != -1) {
const MCRegisterClass RC = MRI.getRegClass(RCID);
auto Reg = mc2PseudoReg(Op.getReg());
}
}
} else if (Op.isImm()) {
- const uint8_t OpTy = Desc.OpInfo[OpNo].OperandType;
+ const uint8_t OpTy = Desc.operands()[OpNo].OperandType;
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
O << "0.0";
else {
const MCInstrDesc &Desc = MII.get(MI->getOpcode());
- int RCID = Desc.OpInfo[OpNo].RegClass;
+ int RCID = Desc.operands()[OpNo].RegClass;
unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID));
if (RCBits == 32)
printImmediate32(FloatToBits(Value), STI, O);
AMDGPU::OpName::src0);
if (Src0Idx >= 0 &&
- Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID &&
+ Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID &&
!AMDGPU::isLegal64BitDPPControl(Imm)) {
O << " /* 64 bit dpp only supports row_newbcast */";
return;
bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
uint64_t &Target) const override {
if (Inst.getNumOperands() == 0 || !Inst.getOperand(0).isImm() ||
- Info->get(Inst.getOpcode()).OpInfo[0].OperandType !=
+ Info->get(Inst.getOpcode()).operands()[0].OperandType !=
MCOI::OPERAND_PCREL)
return false;
// Is this operand a literal immediate?
const MCOperand &Op = MI.getOperand(i);
- auto Enc = getLitEncoding(Op, Desc.OpInfo[i], STI);
+ auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
if (!Enc || *Enc != 255)
continue;
return;
} else {
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
- auto Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
+ auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
if (Enc && *Enc != 255) {
Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
return;
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
- if (auto Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI)) {
+ if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
Op = *Enc;
return;
}
if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
// Only apply the following transformation if that operand requires
// a packed immediate.
- switch (TII->get(Opcode).OpInfo[OpNo].OperandType) {
+ switch (TII->get(Opcode).operands()[OpNo].OperandType) {
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
// scalar instruction
if (TII->isSALU(MI->getOpcode())) {
const MCInstrDesc &InstDesc = MI->getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
// Fine if the operand can be encoded as an inline constant
if (!OpToFold->isReg() && !TII->isInlineConstant(*OpToFold, OpInfo)) {
const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
SmallVectorImpl<FoldCandidate> &FoldList) const {
const MCInstrDesc &Desc = UseMI->getDesc();
- const MCOperandInfo *OpInfo = Desc.OpInfo;
- if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
+ if (UseOpIdx >= Desc.getNumOperands())
return false;
- uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
+ uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType;
if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) &&
(OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST ||
// Don't fold into target independent nodes. Target independent opcodes
// don't have defined register classes.
- if (UseDesc.isVariadic() ||
- UseOp.isImplicit() ||
- UseDesc.OpInfo[UseOpIdx].RegClass == -1)
+ if (UseDesc.isVariadic() || UseOp.isImplicit() ||
+ UseDesc.operands()[UseOpIdx].RegClass == -1)
return;
}
const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
const TargetRegisterClass *FoldRC =
- TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
+ TRI->getRegClass(FoldDesc.operands()[0].RegClass);
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
// Prefer VGPRs over AGPRs in mAI instructions where possible.
// This saves a chain-copy of registers and better balance register
// use between vgpr and agpr as agpr tuples tend to be big.
- if (MI.getDesc().OpInfo) {
+ if (!MI.getDesc().operands().empty()) {
unsigned Opc = MI.getOpcode();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
const MachineOperand &MO) const {
const MCInstrDesc &InstDesc = MI.getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
return false;
}
- int RegClass = Desc.OpInfo[i].RegClass;
+ int RegClass = Desc.operands()[i].RegClass;
- switch (Desc.OpInfo[i].OperandType) {
+ switch (Desc.operands()[i].OperandType) {
case MCOI::OPERAND_REGISTER:
if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
ErrInfo = "Illegal immediate value for operand.";
if (OpIdx == -1)
continue;
const MachineOperand &MO = MI.getOperand(OpIdx);
- if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
+ if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
if (MO.isReg()) {
SGPRUsed = MO.getReg();
if (!llvm::is_contained(SGPRsUsed, SGPRUsed)) {
const MachineOperand &MO = MI.getOperand(OpIdx);
- if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
+ if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
if (MO.getReg() != SGPRUsed)
++SGPRCount;
const MachineOperand &Src1 = MI.getOperand(Src1Idx);
if (!Src0.isReg() && !Src1.isReg() &&
- !isInlineConstant(Src0, Desc.OpInfo[Src0Idx]) &&
- !isInlineConstant(Src1, Desc.OpInfo[Src1Idx]) &&
+ !isInlineConstant(Src0, Desc.operands()[Src0Idx]) &&
+ !isInlineConstant(Src1, Desc.operands()[Src1Idx]) &&
!Src0.isIdenticalTo(Src1)) {
ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
return false;
if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
((DstIdx >= 0 &&
- (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
- Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) ||
+ (Desc.operands()[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
+ Desc.operands()[DstIdx].RegClass ==
+ AMDGPU::VReg_64_Align2RegClassID)) ||
((Src0Idx >= 0 &&
- (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
- Desc.OpInfo[Src0Idx].RegClass ==
+ (Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
+ Desc.operands()[Src0Idx].RegClass ==
AMDGPU::VReg_64_Align2RegClassID)))) &&
!AMDGPU::isLegal64BitDPPControl(DC)) {
ErrInfo = "Invalid dpp_ctrl value: "
const {
if (OpNum >= TID.getNumOperands())
return nullptr;
- auto RegClass = TID.OpInfo[OpNum].RegClass;
+ auto RegClass = TID.operands()[OpNum].RegClass;
bool IsAllocatable = false;
if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) {
// vdst and vdata should be both VGPR or AGPR, same for the DS instructions
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
const MCInstrDesc &Desc = get(MI.getOpcode());
if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
- Desc.OpInfo[OpNo].RegClass == -1) {
+ Desc.operands()[OpNo].RegClass == -1) {
Register Reg = MI.getOperand(OpNo).getReg();
if (Reg.isVirtual())
return RI.getPhysRegBaseClass(Reg);
}
- unsigned RCID = Desc.OpInfo[OpNo].RegClass;
+ unsigned RCID = Desc.operands()[OpNo].RegClass;
return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true);
}
MachineBasicBlock *MBB = MI.getParent();
MachineOperand &MO = MI.getOperand(OpIdx);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
+ unsigned RCID = get(MI.getOpcode()).operands()[OpIdx].RegClass;
const TargetRegisterClass *RC = RI.getRegClass(RCID);
unsigned Size = RI.getRegSizeInBits(*RC);
unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const MCInstrDesc &InstDesc = MI.getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx];
const TargetRegisterClass *DefinedRC =
OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
if (!MO)
if (Op.isReg()) {
RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
if (!SGPRsUsed.count(SGPR) &&
- usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
+ // FIXME: This can access off the end of the operands() array.
+ usesConstantBus(MRI, Op, InstDesc.operands().begin()[i])) {
if (--ConstantBusLimit <= 0)
return false;
SGPRsUsed.insert(SGPR);
}
- } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32 ||
+ } else if (InstDesc.operands()[i].OperandType == AMDGPU::OPERAND_KIMM32 ||
(AMDGPU::isSISrcOperand(InstDesc, i) &&
- !isInlineConstant(Op, InstDesc.OpInfo[i]))) {
+ !isInlineConstant(Op, InstDesc.operands()[i]))) {
if (!LiteralLimit--)
return false;
if (--ConstantBusLimit <= 0)
// VOP2 src0 instructions support all operand types, so we don't need to check
// their legality. If src1 is already legal, we don't need to do anything.
- if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
+ if (isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src1))
return;
// Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
// TODO: Other immediate-like operand kinds could be commuted if there was a
// MachineOperand::ChangeTo* for them.
if ((!Src1.isImm() && !Src1.isReg()) ||
- !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
+ !isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src0)) {
legalizeOpWithMove(MI, Src1Idx);
return;
}
MachineOperand &MO = MI.getOperand(Idx);
if (!MO.isReg()) {
- if (isInlineConstant(MO, get(Opc).OpInfo[Idx]))
+ if (isInlineConstant(MO, get(Opc).operands()[Idx]))
continue;
if (LiteralLimit > 0 && ConstantBusLimit > 0) {
if (RsrcIdx != -1) {
// We have an MUBUF instruction
MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
- unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
+ unsigned RsrcRC = get(MI.getOpcode()).operands()[RsrcIdx].RegClass;
if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
RI.getRegClass(RsrcRC))) {
// The operands are legal.
// Is this operand statically required to be an SGPR based on the operand
// constraints?
- const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
+ const TargetRegisterClass *OpRC =
+ RI.getRegClass(Desc.operands()[Idx].RegClass);
bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
if (IsRequiredSGPR)
return MO.getReg();
bool HasLiteral = false;
for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
const MachineOperand &Op = MI.getOperand(I);
- const MCOperandInfo &OpInfo = Desc.OpInfo[I];
+ const MCOperandInfo &OpInfo = Desc.operands()[I];
if (!Op.isReg() && !isInlineConstant(Op, OpInfo)) {
HasLiteral = true;
break;
if (Idx == -1) // e.g. s_memtime
return false;
- const auto RCID = MI.getDesc().OpInfo[Idx].RegClass;
+ const auto RCID = MI.getDesc().operands()[Idx].RegClass;
return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
}
const MachineOperand &DefMO) const {
assert(UseMO.getParent() == &MI);
int OpIdx = MI.getOperandNo(&UseMO);
- if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) {
+ if (OpIdx >= MI.getDesc().NumOperands)
return false;
- }
- return isInlineConstant(DefMO, MI.getDesc().OpInfo[OpIdx]);
+ return isInlineConstant(DefMO, MI.getDesc().operands()[OpIdx]);
}
/// \p returns true if the operand \p OpIdx in \p MI is a valid inline
/// immediate.
bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx) const {
const MachineOperand &MO = MI.getOperand(OpIdx);
- return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType);
+ return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType);
}
bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand &MO) const {
- if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands)
+ if (OpIdx >= MI.getDesc().NumOperands)
return false;
if (MI.isCopy()) {
return isInlineConstant(MO, OpType);
}
- return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType);
+ return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType);
}
bool isInlineConstant(const MachineOperand &MO) const {
/// Return the size in bytes of the operand OpNo on the given
// instruction opcode.
unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const {
- const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = get(Opcode).operands()[OpNo];
if (OpInfo.RegClass == -1) {
// If this is an immediate operand, this must be a 32-bit literal.
continue;
unsigned I = MI.getOperandNo(&Op);
- if (Desc.OpInfo[I].RegClass == -1 ||
- !TRI->isVSSuperClass(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
+ if (Desc.operands()[I].RegClass == -1 ||
+ !TRI->isVSSuperClass(TRI->getRegClass(Desc.operands()[I].RegClass)))
continue;
if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
auto OperandsNum = OpDesc.getNumOperands();
unsigned CompOprIdx;
for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
- if (OpDesc.OpInfo[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
+ if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
MandatoryLiteralIdx = CompOprIdx;
break;
}
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
OpType <= AMDGPU::OPERAND_SRC_LAST;
}
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
OpType <= AMDGPU::OPERAND_KIMM_LAST;
}
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
switch (OpType) {
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
}
unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned RCID = Desc.OpInfo[OpNo].RegClass;
+ unsigned RCID = Desc.operands()[OpNo].RegClass;
return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
}
LLVM_READNONE
inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) {
- return getOperandSize(Desc.OpInfo[OpNo]);
+ return getOperandSize(Desc.operands()[OpNo]);
}
/// Is this literal inlinable, and not one of the values intended for floating
// IT block. This affects how they are printed.
const MCInstrDesc &MCID = MI.getDesc();
if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
- assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand");
+ assert(MCID.operands()[1].isOptionalDef() &&
+ "CPSR def isn't expected operand");
assert((MI.getOperand(1).isDead() ||
MI.getOperand(1).getReg() != ARM::CPSR) &&
"if conversion tried to stop defining used CPSR");
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
for (unsigned i = 1, e = DefDesc.getNumOperands();
- i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
+ i != e && !DefDesc.operands()[i].isPredicate(); ++i)
NewMI.add(DefMI->getOperand(i));
unsigned CondCode = MI.getOperand(3).getImm();
// Any ARM instruction that sets the 's' bit should specify an optional
// "cc_out" operand in the last operand position.
- if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
+ if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) {
assert(!NewOpc && "Optional cc_out operand required");
return;
}
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
for (unsigned i = 0; i < MCID.NumOperands; ++i) {
- if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ if (ARM::isVpred(MCID.operands()[i].OperandType))
return i;
}
return -1;
// to keep instructions the same shape even though one cannot
// legally be predicated, e.g. vmul.f16 vs vmul.f32.
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
- if (MCID.OpInfo[i].isPredicate()) {
+ if (MCID.operands()[i].isPredicate()) {
if (Inst.getOperand(i).getImm() != ARMCC::AL)
return Error(Loc, "instruction is not predicable");
break;
// Find the optional-def operand (cc_out).
unsigned OpNo;
for (OpNo = 0;
- !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
+ !MCID.operands()[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
++OpNo)
;
// If we're parsing Thumb1, reject it completely.
}
for (unsigned I = 0; I < MCID.NumOperands; ++I)
- if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
+ if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
// rGPRRegClass excludes PC, and also excluded SP before ARMv8
const auto &Op = Inst.getOperand(I);
if (!Op.isReg()) {
MCInst::iterator I = MI.begin();
for (unsigned i = 0; i < MCID.NumOperands; ++i, ++I) {
if (I == MI.end()) break;
- if (MCID.OpInfo[i].isOptionalDef() &&
- MCID.OpInfo[i].RegClass == ARM::CCRRegClassID) {
- if (i > 0 && MCID.OpInfo[i - 1].isPredicate())
+ if (MCID.operands()[i].isOptionalDef() &&
+ MCID.operands()[i].RegClass == ARM::CCRRegClassID) {
+ if (i > 0 && MCID.operands()[i - 1].isPredicate())
continue;
MI.insert(I, MCOperand::createReg(InITBlock ? 0 : ARM::CPSR));
return;
bool ARMDisassembler::isVectorPredicable(const MCInst &MI) const {
const MCInstrDesc &MCID = MCII->get(MI.getOpcode());
for (unsigned i = 0; i < MCID.NumOperands; ++i) {
- if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ if (ARM::isVpred(MCID.operands()[i].OperandType))
return true;
}
return false;
MCInst::iterator CCI = MI.begin();
for (unsigned i = 0; i < MCID.NumOperands; ++i, ++CCI) {
- if (MCID.OpInfo[i].isPredicate() || CCI == MI.end())
+ if (MCID.operands()[i].isPredicate() || CCI == MI.end())
break;
}
MCInst::iterator VCCI = MI.begin();
unsigned VCCPos;
for (VCCPos = 0; VCCPos < MCID.NumOperands; ++VCCPos, ++VCCI) {
- if (ARM::isVpred(MCID.OpInfo[VCCPos].OperandType) || VCCI == MI.end())
+ if (ARM::isVpred(MCID.operands()[VCCPos].OperandType) || VCCI == MI.end())
break;
}
++VCCI;
VCCI = MI.insert(VCCI, MCOperand::createReg(0));
++VCCI;
- if (MCID.OpInfo[VCCPos].OperandType == ARM::OPERAND_VPRED_R) {
+ if (MCID.operands()[VCCPos].OperandType == ARM::OPERAND_VPRED_R) {
int TiedOp = MCID.getOperandConstraint(VCCPos + 3, MCOI::TIED_TO);
assert(TiedOp >= 0 &&
"Inactive register in vpred_r is not tied to an output!");
}
const MCInstrDesc &MCID = MCII->get(MI.getOpcode());
- const MCOperandInfo *OpInfo = MCID.OpInfo;
+ ArrayRef<MCOperandInfo> OpInfo = MCID.operands();
MCInst::iterator I = MI.begin();
unsigned short NumOps = MCID.NumOperands;
for (unsigned i = 0; i < NumOps; ++i, ++I) {
for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
const MCOperand &MO = MI.getOperand(I);
if (MO.isReg() && MO.getReg() == ARM::CPSR &&
- Desc.OpInfo[I].isOptionalDef())
+ Desc.operands()[I].isOptionalDef())
return true;
}
return false;
// Find the PC-relative immediate operand in the instruction.
for (unsigned OpNum = 0; OpNum < Desc.getNumOperands(); ++OpNum) {
if (Inst.getOperand(OpNum).isImm() &&
- Desc.OpInfo[OpNum].OperandType == MCOI::OPERAND_PCREL) {
+ Desc.operands()[OpNum].OperandType == MCOI::OPERAND_PCREL) {
int64_t Imm = Inst.getOperand(OpNum).getImm();
Target = ARM_MC::evaluateBranchTarget(Desc, Addr, Imm);
return true;
// Find the memory addressing operand in the instruction.
unsigned OpIndex = Desc.NumDefs;
while (OpIndex < Desc.getNumOperands() &&
- Desc.OpInfo[OpIndex].OperandType != MCOI::OPERAND_MEMORY)
+ Desc.operands()[OpIndex].OperandType != MCOI::OPERAND_MEMORY)
++OpIndex;
if (OpIndex == Desc.getNumOperands())
return std::nullopt;
int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
const MCInstrDesc &MCID = MI.getDesc();
- if (!MCID.OpInfo)
- return -1;
-
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
- if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ if (ARM::isVpred(MCID.operands()[i].OperandType))
return i;
return -1;
// Transfer the rest of operands.
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
- if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
+ if (i < NumOps && MCID.operands()[i].isOptionalDef())
continue;
- if (SkipPred && MCID.OpInfo[i].isPredicate())
+ if (SkipPred && MCID.operands()[i].isPredicate())
continue;
MIB.add(MI->getOperand(i));
}
const MCInstrDesc &MCID = MI->getDesc();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
- if (MCID.OpInfo[i].isPredicate())
+ if (MCID.operands()[i].isPredicate())
continue;
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
continue;
if (Entry.LowRegs1 && !isARMLowRegister(Reg))
return false;
- } else if (MO.isImm() &&
- !MCID.OpInfo[i].isPredicate()) {
+ } else if (MO.isImm() && !MCID.operands()[i].isPredicate()) {
if (((unsigned)MO.getImm()) > Limit)
return false;
}
// Transfer the rest of operands.
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
- if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
+ if (i < NumOps && MCID.operands()[i].isOptionalDef())
continue;
if ((MCID.getOpcode() == ARM::t2RSBSri ||
MCID.getOpcode() == ARM::t2RSBri ||
MCID.getOpcode() == ARM::t2UXTH) && i == 2)
// Skip the zero immediate operand, it's now implicit.
continue;
- bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
+ bool isPred = (i < NumOps && MCID.operands()[i].isPredicate());
if (SkipPred && isPred)
continue;
const MachineOperand &MO = MI->getOperand(i);
void AVRInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).OpInfo[OpNo];
+ const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).operands()[OpNo];
if (MOI.RegClass == AVR::ZREGRegClassID) {
// Special case for the Z register, which sometimes doesn't have an operand
// in the MCInst.
MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, *ProducerInst);
const unsigned ProducerOpIndex = std::get<1>(Producer);
- if (Desc.OpInfo[ProducerOpIndex].RegClass ==
+ if (Desc.operands()[ProducerOpIndex].RegClass ==
Hexagon::DoubleRegsRegClassID) {
reportNote(ProducerInst->getLoc(),
"Double registers cannot be new-value producers");
assert(!MO.isImm());
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- switch (HexagonMCInstrInfo::getDesc(MCII, MI).OpInfo[OperandNumber].RegClass) {
+ switch (HexagonMCInstrInfo::getDesc(MCII, MI)
+ .operands()[OperandNumber]
+ .RegClass) {
case GeneralSubRegsRegClassID:
case GeneralDoubleLow8RegsRegClassID:
return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, Inst);
return Inst.getOperand(I).isReg() &&
- Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID;
+ Desc.operands()[I].RegClass == Hexagon::PredRegsRegClassID;
}
/// Return whether the insn can be packaged only with A and X-type insns.
return {0, 0, false};
MCInstrDesc const &Desc = getDesc(MCII, MCI);
for (auto I = Desc.getNumDefs(), N = Desc.getNumOperands(); I != N; ++I)
- if (Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID)
+ if (Desc.operands()[I].RegClass == Hexagon::PredRegsRegClassID)
return {MCI.getOperand(I).getReg(), I, isPredicatedTrue(MCII, MCI)};
return {0, 0, false};
}
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
for (unsigned i = 1, e = DefDesc.getNumOperands();
- i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
+ i != e && !DefDesc.operands()[i].isPredicate(); ++i)
NewMI.add(DefMI->getOperand(i));
unsigned CondCode = MI.getOperand(3).getImm();
!isCall(Inst))
return false;
- if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType ==
+ if (Info->get(Inst.getOpcode()).operands()[0].OperandType ==
MCOI::OPERAND_PCREL) {
int64_t Imm = Inst.getOperand(0).getImm();
Target = Addr + Size + Imm;
if (NumOp != 3 && NumOp != 4)
return false;
- const MCOperandInfo &OpInfo = MCID.OpInfo[NumOp - 1];
+ const MCOperandInfo &OpInfo = MCID.operands()[NumOp - 1];
if (OpInfo.OperandType != MCOI::OPERAND_MEMORY &&
OpInfo.OperandType != MCOI::OPERAND_UNKNOWN &&
OpInfo.OperandType != MipsII::OPERAND_MEM_SIMM9)
// Check the offset of memory operand, if it is a symbol
// reference or immediate we may have to expand instructions.
if (needsExpandMemInst(Inst, MCID)) {
- switch (MCID.OpInfo[MCID.getNumOperands() - 1].OperandType) {
+ switch (MCID.operands()[MCID.getNumOperands() - 1].OperandType) {
case MipsII::OPERAND_MEM_SIMM9:
expandMem9Inst(Inst, IDLoc, Out, STI, MCID.mayLoad());
break;
if (MCID.mayLoad() && Opcode != Mips::LWP_MM) {
// Try to create 16-bit GP relative load instruction.
for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
- const MCOperandInfo &OpInfo = MCID.OpInfo[i];
+ const MCOperandInfo &OpInfo = MCID.operands()[i];
if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
(OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
MCOperand &Op = Inst.getOperand(i);
unsigned TmpReg = DstReg;
const MCInstrDesc &Desc = MII.get(OpCode);
- int16_t DstRegClass = Desc.OpInfo[StartOp].RegClass;
+ int16_t DstRegClass = Desc.operands()[StartOp].RegClass;
unsigned DstRegClassID =
getContext().getRegisterInfo()->getRegClass(DstRegClass).getID();
bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) ||
unsigned TmpReg = DstReg;
const MCInstrDesc &Desc = MII.get(OpCode);
- int16_t DstRegClass = Desc.OpInfo[StartOp].RegClass;
+ int16_t DstRegClass = Desc.operands()[StartOp].RegClass;
unsigned DstRegClassID =
getContext().getRegisterInfo()->getRegClass(DstRegClass).getID();
bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) ||
unsigned NumOps = Inst.getNumOperands();
if (NumOps == 0)
return false;
- switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) {
+ switch (Info->get(Inst.getOpcode()).operands()[NumOps - 1].OperandType) {
case MCOI::OPERAND_UNKNOWN:
case MCOI::OPERAND_IMMEDIATE: {
// j, jal, jalx, jals
uint64_t &Target) const override {
unsigned NumOps = Inst.getNumOperands();
if (NumOps == 0 ||
- Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType !=
+ Info->get(Inst.getOpcode()).operands()[NumOps - 1].OperandType !=
MCOI::OPERAND_PCREL)
return false;
Target = Addr + Inst.getOperand(NumOps - 1).getImm() * Size;
assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI");
assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
- const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
+ const MCOperandInfo *UseInfo = &UseMCID.operands()[UseIdx];
// We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0
// register (which might also be specified as a pointer class kind).
/// operands).
static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg,
unsigned OpNo) {
- int16_t regClass = Desc.OpInfo[OpNo].RegClass;
+ int16_t regClass = Desc.operands()[OpNo].RegClass;
switch (regClass) {
// We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31,
// VSX32-VSX63 during encoding/disassembling
const unsigned NumFixedOps = MCDesc.getNumOperands();
const unsigned LastFixedIndex = NumFixedOps - 1;
const int FirstVariableIndex = NumFixedOps;
- if (NumFixedOps > 0 &&
- MCDesc.OpInfo[LastFixedIndex].OperandType == MCOI::OPERAND_UNKNOWN) {
+ if (NumFixedOps > 0 && MCDesc.operands()[LastFixedIndex].OperandType ==
+ MCOI::OPERAND_UNKNOWN) {
// For instructions where a custom type (not reg or immediate) comes as
// the last operand before the variable_ops. This is usually a StringImm
// operand, but there are a few other cases.
// If we define an output, and have at least one other argument.
if (MCDesc.getNumDefs() == 1 && MCDesc.getNumOperands() >= 2) {
// Check if we define an ID, and take a type as operand 1.
- auto DefOpInfo = MCDesc.opInfo_begin();
- auto FirstArgOpInfo = MCDesc.opInfo_begin() + 1;
- return (DefOpInfo->RegClass == SPIRV::IDRegClassID ||
- DefOpInfo->RegClass == SPIRV::ANYIDRegClassID) &&
- FirstArgOpInfo->RegClass == SPIRV::TYPERegClassID;
+ auto &DefOpInfo = MCDesc.operands()[0];
+ auto &FirstArgOpInfo = MCDesc.operands()[1];
+ return (DefOpInfo.RegClass == SPIRV::IDRegClassID ||
+ DefOpInfo.RegClass == SPIRV::ANYIDRegClassID) &&
+ FirstArgOpInfo.RegClass == SPIRV::TYPERegClassID;
}
return false;
}
// to FP conversion.
const MCInstrDesc &MCID = MI.getDesc();
for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
- const MCOperandInfo &MCOI = MCID.OpInfo[I];
+ const MCOperandInfo &MCOI = MCID.operands()[I];
if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
continue;
const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
if (I >= MCID.getNumOperands())
break;
const MachineOperand &Op = MI.getOperand(I);
- const MCOperandInfo &MCOI = MCID.OpInfo[I];
+ const MCOperandInfo &MCOI = MCID.operands()[I];
// Addressing modes have register and immediate operands. Op should be a
// register (or frame index) operand if MCOI.RegClass contains a valid
// register class, or an immediate otherwise.
const auto &II = MII.get(RegOpc);
// First pop all the uses off the stack and check them.
for (unsigned I = II.getNumOperands(); I > II.getNumDefs(); I--) {
- const auto &Op = II.OpInfo[I - 1];
+ const auto &Op = II.operands()[I - 1];
if (Op.OperandType == MCOI::OPERAND_REGISTER) {
auto VT = WebAssembly::regClassToValType(Op.RegClass);
if (popType(ErrorLoc, VT))
}
// Now push all the defs onto the stack.
for (unsigned I = 0; I < II.getNumDefs(); I++) {
- const auto &Op = II.OpInfo[I];
+ const auto &Op = II.operands()[I];
assert(Op.OperandType == MCOI::OPERAND_REGISTER && "Register expected");
auto VT = WebAssembly::regClassToValType(Op.RegClass);
Stack.push_back(VT);
// See if this operand denotes a basic block target.
if (I < NumFixedOperands) {
// A non-variable_ops operand, check its type.
- if (Desc.OpInfo[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK)
+ if (Desc.operands()[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK)
continue;
} else {
// A variable_ops operand, which currently can be immediates (used in
} else if (MO.isImm()) {
if (I < Desc.getNumOperands()) {
- const MCOperandInfo &Info = Desc.OpInfo[I];
+ const MCOperandInfo &Info = Desc.operands()[I];
LLVM_DEBUG(dbgs() << "Encoding immediate: type="
<< int(Info.OperandType) << "\n");
switch (Info.OperandType) {
uint64_t D = MO.getDFPImm();
support::endian::write<uint64_t>(OS, D, support::little);
} else if (MO.isExpr()) {
- const MCOperandInfo &Info = Desc.OpInfo[I];
+ const MCOperandInfo &Info = Desc.operands()[I];
llvm::MCFixupKind FixupKind;
size_t PaddedSize = 5;
switch (Info.OperandType) {
case MachineOperand::MO_Immediate: {
unsigned DescIndex = I - NumVariadicDefs;
if (DescIndex < Desc.NumOperands) {
- const MCOperandInfo &Info = Desc.OpInfo[DescIndex];
+ const MCOperandInfo &Info = Desc.operands()[DescIndex];
if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
SmallVector<wasm::ValType, 4> Returns;
SmallVector<wasm::ValType, 4> Params;
assert((*MI.memoperands_begin())->getSize() ==
(UINT64_C(1) << WebAssembly::GetDefaultP2Align(MI.getOpcode())) &&
"Default p2align value should be natural");
- assert(MI.getDesc().OpInfo[OperandNo].OperandType ==
+ assert(MI.getDesc().operands()[OperandNo].OperandType ==
WebAssembly::OPERAND_P2ALIGN &&
"Load and store instructions should have a p2align operand");
uint64_t P2Align = Log2((*MI.memoperands_begin())->getAlign());
bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr,
uint64_t Size, uint64_t &Target) const {
if (Inst.getNumOperands() == 0 ||
- Info->get(Inst.getOpcode()).OpInfo[0].OperandType != MCOI::OPERAND_PCREL)
+ Info->get(Inst.getOpcode()).operands()[0].OperandType !=
+ MCOI::OPERAND_PCREL)
return false;
Target = Addr + Size + Inst.getOperand(0).getImm();
return true;
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 64> Mask;
DecodePSHUFBMask(C, Width, Mask);
if (!Mask.empty())
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 16> Mask;
DecodeVPERMILPMask(C, ElSize, Width, Mask);
if (!Mask.empty())
const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 16> Mask;
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask);
if (!Mask.empty())
const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 16> Mask;
DecodeVPPERMMask(C, Width, Mask);
if (!Mask.empty())
SmallVector<Operand, 8> Operands;
SmallVector<Variable, 4> Variables;
for (; OpIndex < Description->getNumOperands(); ++OpIndex) {
- const auto &OpInfo = Description->opInfo_begin()[OpIndex];
+ const auto &OpInfo = Description->operands()[OpIndex];
Operand Operand;
Operand.Index = OpIndex;
Operand.IsDef = (OpIndex < Description->getNumDefs());