TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) {
// ToDo: AMDGPUDisassembler supports only VI ISA.
- if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10Plus())
+ if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
report_fatal_error("Disassembly not yet supported for subtarget");
}
if (Bytes.size() >= 8) {
const uint64_t QW = eatBytes<uint64_t>(Bytes);
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) {
Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address);
if (Res) {
if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8)
Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
if (Res) { IsSDWA = true; break; }
- if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
+ if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem)) {
Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
if (Res)
break;
// Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
// v_mad_mixhi_f16 for FMA variants. Try to decode using this special
// table first so we print the correct name.
- if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
+ if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts)) {
Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
if (Res)
break;
Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
if (Res) break;
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts)) {
Res = tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address);
if (Res)
break;
}
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) {
Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address);
if (Res) break;
}
if (Bytes.size() < 4) break;
const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX940Insts)) {
Res = tryDecodeInst(DecoderTableGFX94064, MI, QW, Address);
if (Res)
break;
}
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts)) {
Res = tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address);
if (Res)
break;
if (Res && (MCII->get(MI.getOpcode()).TSFlags &
(SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
- (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts])) {
+ (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
// GFX90A lost TFE, its place is occupied by ACC.
int TFEOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
}
DecodeStatus AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX11]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX11)) {
// The MCInst still has these fields even though they are no longer encoded
// in the GFX11 instruction.
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
}
DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
- STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
+ STI.hasFeature(AMDGPU::FeatureGFX10)) {
if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
// VOPC - insert clamp
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
- } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
+ } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
if (SDst != -1) {
// VOPC - insert VCC register as sdst
using namespace AMDGPU::SDWA;
using namespace AMDGPU::EncValues;
- if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
- STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
+ if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
+ STI.hasFeature(AMDGPU::FeatureGFX10)) {
// XXX: cast to int is needed to avoid stupid warning:
// compare with unsigned is always true
if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
return decodeFPImmed(ImmWidth, SVal);
return decodeSpecialReg32(SVal);
- } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
+ } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
return createRegOperand(getVgprClassId(Width), Val);
}
llvm_unreachable("unsupported target");
MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
using namespace AMDGPU::SDWA;
- assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
- STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
+ assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
+ STI.hasFeature(AMDGPU::FeatureGFX10)) &&
"SDWAVopcDst should be present only on GFX9+");
- bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64];
+ bool IsWave64 = STI.hasFeature(AMDGPU::FeatureWavefrontSize64);
if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
}
MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
- return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]
+ return STI.hasFeature(AMDGPU::FeatureWavefrontSize64)
? decodeSrcOp(OPW64, Val)
: decodeSrcOp(OPW32, Val);
}
bool AMDGPUDisassembler::isVI() const {
- return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
+ return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
}
bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
bool AMDGPUDisassembler::isGFX90A() const {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
+ return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
}
bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
}
bool AMDGPUDisassembler::isGFX11() const {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX11];
+ return STI.hasFeature(AMDGPU::FeatureGFX11);
}
bool AMDGPUDisassembler::isGFX11Plus() const {
bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
- return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
+ return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
}
//===----------------------------------------------------------------------===//
bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
const MCSubtargetInfo &STI) const {
- if (!STI.getFeatureBits()[AMDGPU::FeatureOffset3fBug])
+ if (!STI.hasFeature(AMDGPU::FeatureOffset3fBug))
return false;
if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
else if (Imm == 0xC400)
O<< "-4.0";
else if (Imm == 0x3118 &&
- STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) {
+ STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) {
O << "0.15915494";
} else {
uint64_t Imm16 = static_cast<uint16_t>(Imm);
else if (Imm == llvm::bit_cast<uint32_t>(-4.0f))
O << "-4.0";
else if (Imm == 0x3e22f983 &&
- STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
+ STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
O << "0.15915494";
else
O << formatHex(static_cast<uint64_t>(Imm));
else if (Imm == llvm::bit_cast<uint64_t>(-4.0))
O << "-4.0";
else if (Imm == 0x3fc45f306dc9c882 &&
- STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
+ STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
O << "0.15915494309189532";
else {
assert(isUInt<32>(Imm) || isInt<32>(Imm));
raw_ostream &O) {
if (!FirstOperand)
O << ", ";
- printRegOperand(STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]
+ printRegOperand(STI.hasFeature(AMDGPU::FeatureWavefrontSize64)
? AMDGPU::VCC
: AMDGPU::VCC_LO,
O, MRI);
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
if (!isUInt<16>(Op.getImm()) &&
- STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) {
+ STI.hasFeature(AMDGPU::FeatureVOP3Literal)) {
printImmediate32(Op.getImm(), STI, O);
break;
}
return MaxInstLength;
// Maximum for NSA encoded images
- if (STI->getFeatureBits()[AMDGPU::FeatureNSAEncoding])
+ if (STI->hasFeature(AMDGPU::FeatureNSAEncoding))
return 20;
// 64-bit instruction with 32-bit literal.
- if (STI->getFeatureBits()[AMDGPU::FeatureVOP3Literal])
+ if (STI->hasFeature(AMDGPU::FeatureVOP3Literal))
return 12;
return 8;
} else if (IS_VTX(Desc)) {
uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups, STI);
uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
- if (!(STI.getFeatureBits()[R600::FeatureCaymanISA])) {
+ if (!(STI.hasFeature(R600::FeatureCaymanISA))) {
InstWord2 |= 1 << 19; // Mega-Fetch bit
}
Emit((uint32_t) 0, OS);
} else {
uint64_t Inst = getBinaryCodeForInstr(MI, Fixups, STI);
- if ((STI.getFeatureBits()[R600::FeatureR600ALUInst]) &&
+ if ((STI.hasFeature(R600::FeatureR600ALUInst)) &&
((Desc.TSFlags & R600_InstFlag::OP1) ||
Desc.TSFlags & R600_InstFlag::OP2)) {
uint64_t ISAOpCode = Inst & (0x3FFULL << 39);
return 247;
if (Val == 0x3118 && // 1.0 / (2.0 * pi)
- STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
+ STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
return 248;
return 255;
return 247;
if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
- STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
+ STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
return 248;
return 255;
return 247;
if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
- STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
+ STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
return 248;
return 255;
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16: {
- if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])
+ if (!isUInt<16>(Imm) && STI.hasFeature(AMDGPU::FeatureVOP3Literal))
return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
OS.write(0);
}
- if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) ||
- (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]))
+ if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
+ (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
return;
// Do not print literals from SISrc Operands for insts with mandatory literals
}
bool hasXNACK(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
+ return STI.hasFeature(AMDGPU::FeatureXNACK);
}
bool hasSRAMECC(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
+ return STI.hasFeature(AMDGPU::FeatureSRAMECC);
}
bool hasMIMG_R128(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16];
+ return STI.hasFeature(AMDGPU::FeatureMIMG_R128) && !STI.hasFeature(AMDGPU::FeatureR128A16);
}
bool hasA16(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureA16];
+ return STI.hasFeature(AMDGPU::FeatureA16);
}
bool hasG16(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureG16];
+ return STI.hasFeature(AMDGPU::FeatureG16);
}
bool hasPackedD16(const MCSubtargetInfo &STI) {
- return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) &&
+ return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
!isSI(STI);
}
bool isSI(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
+ return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
}
bool isCI(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
+ return STI.hasFeature(AMDGPU::FeatureSeaIslands);
}
bool isVI(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
+ return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
}
bool isGFX9(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
+ return STI.hasFeature(AMDGPU::FeatureGFX9);
}
bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
}
bool isGFX10(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
+ return STI.hasFeature(AMDGPU::FeatureGFX10);
}
bool isGFX10Plus(const MCSubtargetInfo &STI) {
}
bool isGFX11(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX11];
+ return STI.hasFeature(AMDGPU::FeatureGFX11);
}
bool isGFX11Plus(const MCSubtargetInfo &STI) {
}
bool isGCN3Encoding(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
+ return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
}
bool isGFX10_AEncoding(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding];
+ return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
}
bool isGFX10_BEncoding(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding];
+ return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
}
bool hasGFX10_3Insts(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts];
+ return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
}
bool isGFX90A(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
+ return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
}
bool isGFX940(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts];
+ return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
}
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
+ return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
}
bool hasMAIInsts(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts];
+ return STI.hasFeature(AMDGPU::FeatureMAIInsts);
}
bool hasVOPD(const MCSubtargetInfo &STI) {
- return STI.getFeatureBits()[AMDGPU::FeatureVOPD];
+ return STI.hasFeature(AMDGPU::FeatureVOPD);
}
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
}
static bool isThumb(const MCSubtargetInfo& STI) {
- return STI.getFeatureBits()[ARM::ModeThumb];
+ return STI.hasFeature(ARM::ModeThumb);
}
void ARMAsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
}
bool ARMBaseInstrInfo::hasNOP() const {
- return Subtarget.getFeatureBits()[ARM::HasV6KOps];
+ return Subtarget.hasFeature(ARM::HasV6KOps);
}
bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
bool isThumb() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[ARM::ModeThumb];
+ return getSTI().hasFeature(ARM::ModeThumb);
}
bool isThumbOne() const {
- return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
+ return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
}
bool isThumbTwo() const {
- return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
+ return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
}
bool hasThumb() const {
- return getSTI().getFeatureBits()[ARM::HasV4TOps];
+ return getSTI().hasFeature(ARM::HasV4TOps);
}
bool hasThumb2() const {
- return getSTI().getFeatureBits()[ARM::FeatureThumb2];
+ return getSTI().hasFeature(ARM::FeatureThumb2);
}
bool hasV6Ops() const {
- return getSTI().getFeatureBits()[ARM::HasV6Ops];
+ return getSTI().hasFeature(ARM::HasV6Ops);
}
bool hasV6T2Ops() const {
- return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
+ return getSTI().hasFeature(ARM::HasV6T2Ops);
}
bool hasV6MOps() const {
- return getSTI().getFeatureBits()[ARM::HasV6MOps];
+ return getSTI().hasFeature(ARM::HasV6MOps);
}
bool hasV7Ops() const {
- return getSTI().getFeatureBits()[ARM::HasV7Ops];
+ return getSTI().hasFeature(ARM::HasV7Ops);
}
bool hasV8Ops() const {
- return getSTI().getFeatureBits()[ARM::HasV8Ops];
+ return getSTI().hasFeature(ARM::HasV8Ops);
}
bool hasV8MBaseline() const {
- return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
+ return getSTI().hasFeature(ARM::HasV8MBaselineOps);
}
bool hasV8MMainline() const {
- return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
+ return getSTI().hasFeature(ARM::HasV8MMainlineOps);
}
bool hasV8_1MMainline() const {
- return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
+ return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
}
bool hasMVE() const {
- return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
+ return getSTI().hasFeature(ARM::HasMVEIntegerOps);
}
bool hasMVEFloat() const {
- return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
+ return getSTI().hasFeature(ARM::HasMVEFloatOps);
}
bool hasCDE() const {
- return getSTI().getFeatureBits()[ARM::HasCDEOps];
+ return getSTI().hasFeature(ARM::HasCDEOps);
}
bool has8MSecExt() const {
- return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
+ return getSTI().hasFeature(ARM::Feature8MSecExt);
}
bool hasARM() const {
- return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
+ return !getSTI().hasFeature(ARM::FeatureNoARM);
}
bool hasDSP() const {
- return getSTI().getFeatureBits()[ARM::FeatureDSP];
+ return getSTI().hasFeature(ARM::FeatureDSP);
}
bool hasD32() const {
- return getSTI().getFeatureBits()[ARM::FeatureD32];
+ return getSTI().hasFeature(ARM::FeatureD32);
}
bool hasV8_1aOps() const {
- return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
+ return getSTI().hasFeature(ARM::HasV8_1aOps);
}
bool hasRAS() const {
- return getSTI().getFeatureBits()[ARM::FeatureRAS];
+ return getSTI().hasFeature(ARM::FeatureRAS);
}
void SwitchMode() {
void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
bool isMClass() const {
- return getSTI().getFeatureBits()[ARM::FeatureMClass];
+ return getSTI().hasFeature(ARM::FeatureMClass);
}
/// @name Auto-generated Match Functions
ARMDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
const MCInstrInfo *MCII)
: MCDisassembler(STI, Ctx), MCII(MCII) {
- InstructionEndianness = STI.getFeatureBits()[ARM::ModeBigEndianInstructions]
+ InstructionEndianness = STI.hasFeature(ARM::ModeBigEndianInstructions)
? llvm::support::big
: llvm::support::little;
}
// In Arm state, instructions are always 4 bytes wide, so there's no
// point in skipping any smaller number of bytes if an instruction
// can't be decoded.
- if (!STI.getFeatureBits()[ARM::ModeThumb])
+ if (!STI.hasFeature(ARM::ModeThumb))
return 4;
// In a Thumb instruction stream, a halfword is a standalone 2-byte
ArrayRef<uint8_t> Bytes,
uint64_t Address,
raw_ostream &CS) const {
- if (STI.getFeatureBits()[ARM::ModeThumb])
+ if (STI.hasFeature(ARM::ModeThumb))
return getThumbInstruction(MI, Size, Bytes, Address, CS);
return getARMInstruction(MI, Size, Bytes, Address, CS);
}
raw_ostream &CS) const {
CommentStream = &CS;
- assert(!STI.getFeatureBits()[ARM::ModeThumb] &&
+ assert(!STI.hasFeature(ARM::ModeThumb) &&
"Asked to disassemble an ARM instruction but Subtarget is in Thumb "
"mode!");
raw_ostream &CS) const {
CommentStream = &CS;
- assert(STI.getFeatureBits()[ARM::ModeThumb] &&
+ assert(STI.hasFeature(ARM::ModeThumb) &&
"Asked to disassemble in Thumb mode but Subtarget is in ARM mode!");
// We want to read exactly 2 bytes of data.
unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
const MCSubtargetInfo &STI) const {
- bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
- bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
+ bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2);
+ bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps);
switch (Op) {
default:
}
case ARM::fixup_arm_thumb_bl: {
if (!isInt<25>(Value - 4) ||
- (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
- !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
- !STI->getFeatureBits()[ARM::HasV6MOps] &&
+ (!STI->hasFeature(ARM::FeatureThumb2) &&
+ !STI->hasFeature(ARM::HasV8MBaselineOps) &&
+ !STI->hasFeature(ARM::HasV6MOps) &&
!isInt<23>(Value - 4))) {
Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
return 0;
// On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
// could have an error on our hands.
assert(STI != nullptr);
- if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
+ if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
case ARM::fixup_arm_thumb_br:
// Offset by 4 and don't encode the lower bit, which is always 0.
assert(STI != nullptr);
- if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
- !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
+ if (!STI->hasFeature(ARM::FeatureThumb2) &&
+ !STI->hasFeature(ARM::HasV8MBaselineOps)) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
case ARM::fixup_arm_thumb_bcc:
// Offset by 4 and don't encode the lower bit, which is always 0.
assert(STI != nullptr);
- if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
+ if (!STI->hasFeature(ARM::FeatureThumb2)) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
}
bool hasNOP(const MCSubtargetInfo *STI) const {
- return STI->getFeatureBits()[ARM::HasV6T2Ops];
+ return STI->hasFeature(ARM::HasV6T2Ops);
}
std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned val = MI->getOperand(OpNum).getImm();
- O << ARM_MB::MemBOptToString(val, STI.getFeatureBits()[ARM::HasV8Ops]);
+ O << ARM_MB::MemBOptToString(val, STI.hasFeature(ARM::HasV8Ops));
}
void ARMInstPrinter::printInstSyncBOption(const MCInst *MI, unsigned OpNum,
~ARMMCCodeEmitter() override = default;
bool isThumb(const MCSubtargetInfo &STI) const {
- return STI.getFeatureBits()[ARM::ModeThumb];
+ return STI.hasFeature(ARM::ModeThumb);
}
bool isThumb2(const MCSubtargetInfo &STI) const {
- return isThumb(STI) && STI.getFeatureBits()[ARM::FeatureThumb2];
+ return isThumb(STI) && STI.hasFeature(ARM::FeatureThumb2);
}
bool isTargetMachO(const MCSubtargetInfo &STI) const {
// the encodings all refer to Q-registers by their literal
// register number.
- if (STI.getFeatureBits()[ARM::HasMVEIntegerOps])
+ if (STI.hasFeature(ARM::HasMVEIntegerOps))
return RegNo;
switch (Reg) {
static bool getMCRDeprecationInfo(MCInst &MI, const MCSubtargetInfo &STI,
std::string &Info) {
- if (STI.getFeatureBits()[llvm::ARM::HasV7Ops] &&
+ if (STI.hasFeature(llvm::ARM::HasV7Ops) &&
(MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 15) &&
(MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) &&
// Checks for the deprecated CP15ISB encoding:
return true;
}
}
- if (STI.getFeatureBits()[llvm::ARM::HasV7Ops] &&
+ if (STI.hasFeature(llvm::ARM::HasV7Ops) &&
((MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 10) ||
(MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 11))) {
Info = "since v7, cp10 and cp11 are reserved for advanced SIMD or floating "
static bool getMRCDeprecationInfo(MCInst &MI, const MCSubtargetInfo &STI,
std::string &Info) {
- if (STI.getFeatureBits()[llvm::ARM::HasV7Ops] &&
+ if (STI.hasFeature(llvm::ARM::HasV7Ops) &&
((MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 10) ||
(MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 11))) {
Info = "since v7, cp10 and cp11 are reserved for advanced SIMD or floating "
static bool getARMStoreDeprecationInfo(MCInst &MI, const MCSubtargetInfo &STI,
std::string &Info) {
- assert(!STI.getFeatureBits()[llvm::ARM::ModeThumb] &&
+ assert(!STI.hasFeature(llvm::ARM::ModeThumb) &&
"cannot predicate thumb instructions");
assert(MI.getNumOperands() >= 4 && "expected >= 4 arguments");
static bool getARMLoadDeprecationInfo(MCInst &MI, const MCSubtargetInfo &STI,
std::string &Info) {
- assert(!STI.getFeatureBits()[llvm::ARM::ModeThumb] &&
+ assert(!STI.hasFeature(llvm::ARM::ModeThumb) &&
"cannot predicate thumb instructions");
assert(MI.getNumOperands() >= 4 && "expected >= 4 arguments");
// VLDR* instructions share the same opcode (and thus the same form) for Arm
// and Thumb. Use a bit longer route through STI in that case.
case ARMII::VFPLdStFrm:
- Addr += STI->getFeatureBits()[ARM::ModeThumb] ? 4 : 8;
+ Addr += STI->hasFeature(ARM::ModeThumb) ? 4 : 8;
break;
}
return MCDisassembler::Fail;
// Try to decode AVRTiny instructions.
- if (STI.getFeatureBits()[AVR::FeatureTinyEncoding]) {
+ if (STI.hasFeature(AVR::FeatureTinyEncoding)) {
Result = decodeInstruction(DecoderTableAVRTiny16, Instr, Insn, Address,
this, STI);
if (Result != MCDisassembler::Fail)
if ((InstClass == BPF_LDX || InstClass == BPF_STX) &&
getInstSize(Insn) != BPF_DW &&
(InstMode == BPF_MEM || InstMode == BPF_ATOMIC) &&
- STI.getFeatureBits()[BPF::ALU32])
+ STI.hasFeature(BPF::ALU32))
Result = decodeInstruction(DecoderTableBPFALU3264, Instr, Insn, Address,
this, STI);
else
if (isUInt<8>(Inst.getOperand(1).getImm()) &&
Inst.getOperand(0).getReg() <= CSKY::R7) {
Opcode = CSKY::MOVI16;
- } else if (getSTI().getFeatureBits()[CSKY::HasE2] &&
+ } else if (getSTI().hasFeature(CSKY::HasE2) &&
isUInt<16>(Inst.getOperand(1).getImm())) {
Opcode = CSKY::MOVI32;
} else {
const MCDisassembler *DisAsm,
const MCSubtargetInfo &STI) {
LLVM_DEBUG(dbgs() << "Trying CSKY 32-bit fpuv3 table :\n");
- if (!STI.getFeatureBits()[CSKY::FeatureFPUV3_HF] &&
- !STI.getFeatureBits()[CSKY::FeatureFPUV3_SF] &&
- !STI.getFeatureBits()[CSKY::FeatureFPUV3_DF])
+ if (!STI.hasFeature(CSKY::FeatureFPUV3_HF) &&
+ !STI.hasFeature(CSKY::FeatureFPUV3_SF) &&
+ !STI.hasFeature(CSKY::FeatureFPUV3_DF))
return false;
DecodeStatus Result =
case CSKY::JBT32:
case CSKY::JBF32:
case CSKY::JBSR32:
- if (!STI.getFeatureBits()[CSKY::Has2E3])
+ if (!STI.hasFeature(CSKY::Has2E3))
return false;
return true;
case CSKY::JBR16:
case CSKY::JBF16:
// ck801
unsigned opcode;
- if (STI.getFeatureBits()[CSKY::HasE2])
+ if (STI.hasFeature(CSKY::HasE2))
opcode = Inst.getOpcode() == CSKY::JBT16 ? CSKY::JBT32 : CSKY::JBF32;
else
opcode = Inst.getOpcode() == CSKY::JBT16 ? CSKY::JBT_E : CSKY::JBF_E;
if (Reg == CSKY::C)
O << "";
- else if (STI.getFeatureBits()[CSKY::FeatureJAVA]) {
+ else if (STI.hasFeature(CSKY::FeatureJAVA)) {
if (Reg == CSKY::R23)
O << (useABIName ? "fp" : "r23");
else if (Reg == CSKY::R24)
Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
writeData(Binary, 2, OS);
- if (!STI.getFeatureBits()[CSKY::Has2E3])
+ if (!STI.hasFeature(CSKY::Has2E3))
TmpInst = MCInstBuilder(CSKY::BR32)
.addOperand(MI.getOperand(1))
.addOperand(MI.getOperand(2));
} else if (Option.compare_insensitive("endloop1") == 0) {
HexagonMCInstrInfo::setOuterLoop(MCB);
} else if (Option.compare_insensitive("mem_noshuf") == 0) {
- if (getSTI().getFeatureBits()[Hexagon::FeatureMemNoShuf])
+ if (getSTI().hasFeature(Hexagon::FeatureMemNoShuf))
HexagonMCInstrInfo::setMemReorderDisabled(MCB);
else
return getParser().Error(IDLoc, MemNoShuffMsg);
// validate register against architecture
bool HexagonAsmParser::RegisterMatchesArch(unsigned MatchNum) const {
if (HexagonMCRegisterClasses[Hexagon::V62RegsRegClassID].contains(MatchNum))
- if (!getSTI().getFeatureBits()[Hexagon::ArchV62])
+ if (!getSTI().hasFeature(Hexagon::ArchV62))
return false;
return true;
}
break;
case Hexagon::J2_trap1:
- if (!getSTI().getFeatureBits()[Hexagon::ArchV65]) {
+ if (!getSTI().hasFeature(Hexagon::ArchV65)) {
MCOperand &Rx = Inst.getOperand(0);
MCOperand &Ry = Inst.getOperand(1);
if (Rx.getReg() != Hexagon::R0 || Ry.getReg() != Hexagon::R0) {
STI);
if (Result != MCDisassembler::Success &&
- STI.getFeatureBits()[Hexagon::ExtensionHVX])
+ STI.hasFeature(Hexagon::ExtensionHVX))
Result = decodeInstruction(DecoderTableEXT_mmvec32, MI, Instruction,
Address, this, STI);
// All Hexagon architectures have prediction bits on dot-new branches,
// but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
// to pick the right opcode when converting back to dot-old.
- if (!Subtarget.getFeatureBits()[Hexagon::ArchV60]) {
+ if (!Subtarget.hasFeature(Hexagon::ArchV60)) {
switch (NewOp) {
case Hexagon::J2_jumptpt:
NewOp = Hexagon::J2_jumpt;
const bool IgnoreTmpDst = (HexagonMCInstrInfo::hasTmpDst(MCII, MCI) ||
HexagonMCInstrInfo::hasHvxTmp(MCII, MCI)) &&
- STI.getFeatureBits()[Hexagon::ArchV69];
+ STI.hasFeature(Hexagon::ArchV69);
// Get implicit register definitions.
for (MCPhysReg R : MCID.implicit_defs()) {
}
bool HexagonMCChecker::checkValidTmpDst() {
- if (!STI.getFeatureBits()[Hexagon::ArchV69]) {
+ if (!STI.hasFeature(Hexagon::ArchV69)) {
return true;
}
auto HasTmp = [&](MCInst const &I) {
}
bool HexagonMCChecker::checkLegalVecRegPair() {
- const bool IsPermitted = STI.getFeatureBits()[Hexagon::ArchV67];
+ const bool IsPermitted = STI.hasFeature(Hexagon::ArchV67);
const bool HasReversePairs = ReversePairs.size() != 0;
if (!IsPermitted && HasReversePairs) {
HexagonMCShuffle(Context, false, MCII, STI, MCB);
const SmallVector<DuplexCandidate, 8> possibleDuplexes =
- (STI.getFeatureBits()[Hexagon::FeatureDuplex])
+ (STI.hasFeature(Hexagon::FeatureDuplex))
? HexagonMCInstrInfo::getDuplexPossibilties(MCII, STI, MCB)
: SmallVector<DuplexCandidate, 8>();
}
unsigned HexagonMCInstrInfo::packetSizeSlots(MCSubtargetInfo const &STI) {
- const bool IsTiny = STI.getFeatureBits()[Hexagon::ProcTinyCore];
+ const bool IsTiny = STI.hasFeature(Hexagon::ProcTinyCore);
return IsTiny ? (HEXAGON_PACKET_SIZE - 1) : HEXAGON_PACKET_SIZE;
}
}
static void LLVM_ATTRIBUTE_UNUSED clearFeature(MCSubtargetInfo* STI, uint64_t F) {
- if (STI->getFeatureBits()[F])
+ if (STI->hasFeature(F))
STI->ToggleFeature(F);
}
static bool LLVM_ATTRIBUTE_UNUSED checkFeature(MCSubtargetInfo* STI, uint64_t F) {
- return STI->getFeatureBits()[F];
+ return STI->hasFeature(F);
}
namespace {
case CallingConv::Fast:
break;
case CallingConv::GHC:
- if (!MF.getSubtarget().getFeatureBits()[LoongArch::FeatureBasicF] ||
- !MF.getSubtarget().getFeatureBits()[LoongArch::FeatureBasicD])
+ if (!MF.getSubtarget().hasFeature(LoongArch::FeatureBasicF) ||
+ !MF.getSubtarget().hasFeature(LoongArch::FeatureBasicD))
report_fatal_error(
"GHC calling convention requires the F and D extensions");
}
}
void setFeatureBits(uint64_t Feature, StringRef FeatureString) {
- if (!(getSTI().getFeatureBits()[Feature])) {
+ if (!(getSTI().hasFeature(Feature))) {
MCSubtargetInfo &STI = copySTI();
setAvailableFeatures(
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
}
void clearFeatureBits(uint64_t Feature, StringRef FeatureString) {
- if (getSTI().getFeatureBits()[Feature]) {
+ if (getSTI().hasFeature(Feature)) {
MCSubtargetInfo &STI = copySTI();
setAvailableFeatures(
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
bool hasEightFccRegisters() const { return hasMips4() || hasMips32(); }
bool isGP64bit() const {
- return getSTI().getFeatureBits()[Mips::FeatureGP64Bit];
+ return getSTI().hasFeature(Mips::FeatureGP64Bit);
}
bool isFP64bit() const {
- return getSTI().getFeatureBits()[Mips::FeatureFP64Bit];
+ return getSTI().hasFeature(Mips::FeatureFP64Bit);
}
bool isJalrRelocAvailable(const MCExpr *JalExpr) {
bool isABI_N64() const { return ABI.IsN64(); }
bool isABI_O32() const { return ABI.IsO32(); }
bool isABI_FPXX() const {
- return getSTI().getFeatureBits()[Mips::FeatureFPXX];
+ return getSTI().hasFeature(Mips::FeatureFPXX);
}
bool useOddSPReg() const {
- return !(getSTI().getFeatureBits()[Mips::FeatureNoOddSPReg]);
+ return !(getSTI().hasFeature(Mips::FeatureNoOddSPReg));
}
bool inMicroMipsMode() const {
- return getSTI().getFeatureBits()[Mips::FeatureMicroMips];
+ return getSTI().hasFeature(Mips::FeatureMicroMips);
}
bool hasMips1() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips1];
+ return getSTI().hasFeature(Mips::FeatureMips1);
}
bool hasMips2() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips2];
+ return getSTI().hasFeature(Mips::FeatureMips2);
}
bool hasMips3() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips3];
+ return getSTI().hasFeature(Mips::FeatureMips3);
}
bool hasMips4() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips4];
+ return getSTI().hasFeature(Mips::FeatureMips4);
}
bool hasMips5() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips5];
+ return getSTI().hasFeature(Mips::FeatureMips5);
}
bool hasMips32() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips32];
+ return getSTI().hasFeature(Mips::FeatureMips32);
}
bool hasMips64() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips64];
+ return getSTI().hasFeature(Mips::FeatureMips64);
}
bool hasMips32r2() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips32r2];
+ return getSTI().hasFeature(Mips::FeatureMips32r2);
}
bool hasMips64r2() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips64r2];
+ return getSTI().hasFeature(Mips::FeatureMips64r2);
}
bool hasMips32r3() const {
- return (getSTI().getFeatureBits()[Mips::FeatureMips32r3]);
+ return (getSTI().hasFeature(Mips::FeatureMips32r3));
}
bool hasMips64r3() const {
- return (getSTI().getFeatureBits()[Mips::FeatureMips64r3]);
+ return (getSTI().hasFeature(Mips::FeatureMips64r3));
}
bool hasMips32r5() const {
- return (getSTI().getFeatureBits()[Mips::FeatureMips32r5]);
+ return (getSTI().hasFeature(Mips::FeatureMips32r5));
}
bool hasMips64r5() const {
- return (getSTI().getFeatureBits()[Mips::FeatureMips64r5]);
+ return (getSTI().hasFeature(Mips::FeatureMips64r5));
}
bool hasMips32r6() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips32r6];
+ return getSTI().hasFeature(Mips::FeatureMips32r6);
}
bool hasMips64r6() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips64r6];
+ return getSTI().hasFeature(Mips::FeatureMips64r6);
}
bool hasDSP() const {
- return getSTI().getFeatureBits()[Mips::FeatureDSP];
+ return getSTI().hasFeature(Mips::FeatureDSP);
}
bool hasDSPR2() const {
- return getSTI().getFeatureBits()[Mips::FeatureDSPR2];
+ return getSTI().hasFeature(Mips::FeatureDSPR2);
}
bool hasDSPR3() const {
- return getSTI().getFeatureBits()[Mips::FeatureDSPR3];
+ return getSTI().hasFeature(Mips::FeatureDSPR3);
}
bool hasMSA() const {
- return getSTI().getFeatureBits()[Mips::FeatureMSA];
+ return getSTI().hasFeature(Mips::FeatureMSA);
}
bool hasCnMips() const {
- return (getSTI().getFeatureBits()[Mips::FeatureCnMips]);
+ return (getSTI().hasFeature(Mips::FeatureCnMips));
}
bool hasCnMipsP() const {
- return (getSTI().getFeatureBits()[Mips::FeatureCnMipsP]);
+ return (getSTI().hasFeature(Mips::FeatureCnMipsP));
}
bool inPicMode() {
}
bool inMips16Mode() const {
- return getSTI().getFeatureBits()[Mips::FeatureMips16];
+ return getSTI().hasFeature(Mips::FeatureMips16);
}
bool useTraps() const {
- return getSTI().getFeatureBits()[Mips::FeatureUseTCCInDIV];
+ return getSTI().hasFeature(Mips::FeatureUseTCCInDIV);
}
bool useSoftFloat() const {
- return getSTI().getFeatureBits()[Mips::FeatureSoftFloat];
+ return getSTI().hasFeature(Mips::FeatureSoftFloat);
}
bool hasMT() const {
- return getSTI().getFeatureBits()[Mips::FeatureMT];
+ return getSTI().hasFeature(Mips::FeatureMT);
}
bool hasCRC() const {
- return getSTI().getFeatureBits()[Mips::FeatureCRC];
+ return getSTI().hasFeature(Mips::FeatureCRC);
}
bool hasVirt() const {
- return getSTI().getFeatureBits()[Mips::FeatureVirt];
+ return getSTI().hasFeature(Mips::FeatureVirt);
}
bool hasGINV() const {
- return getSTI().getFeatureBits()[Mips::FeatureGINV];
+ return getSTI().hasFeature(Mips::FeatureGINV);
}
/// Warn if RegIndex is the same as the current AT.
(Res.getSymA()->getSymbol().isELF() &&
cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
ELF::STB_LOCAL);
- bool UseXGOT = STI->getFeatureBits()[Mips::FeatureXGOT] && !IsLocalSym;
+ bool UseXGOT = STI->hasFeature(Mips::FeatureXGOT) && !IsLocalSym;
// The case where the result register is $25 is somewhat special. If the
// symbol in the final relocation is external and not modified with a
public:
MipsDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, bool IsBigEndian)
: MCDisassembler(STI, Ctx),
- IsMicroMips(STI.getFeatureBits()[Mips::FeatureMicroMips]),
+ IsMicroMips(STI.hasFeature(Mips::FeatureMicroMips)),
IsBigEndian(IsBigEndian) {}
- bool hasMips2() const { return STI.getFeatureBits()[Mips::FeatureMips2]; }
- bool hasMips3() const { return STI.getFeatureBits()[Mips::FeatureMips3]; }
- bool hasMips32() const { return STI.getFeatureBits()[Mips::FeatureMips32]; }
+ bool hasMips2() const { return STI.hasFeature(Mips::FeatureMips2); }
+ bool hasMips3() const { return STI.hasFeature(Mips::FeatureMips3); }
+ bool hasMips32() const { return STI.hasFeature(Mips::FeatureMips32); }
bool hasMips32r6() const {
- return STI.getFeatureBits()[Mips::FeatureMips32r6];
+ return STI.hasFeature(Mips::FeatureMips32r6);
}
- bool isFP64() const { return STI.getFeatureBits()[Mips::FeatureFP64Bit]; }
+ bool isFP64() const { return STI.hasFeature(Mips::FeatureFP64Bit); }
- bool isGP64() const { return STI.getFeatureBits()[Mips::FeatureGP64Bit]; }
+ bool isGP64() const { return STI.hasFeature(Mips::FeatureGP64Bit); }
- bool isPTR64() const { return STI.getFeatureBits()[Mips::FeaturePTR64Bit]; }
+ bool isPTR64() const { return STI.hasFeature(Mips::FeaturePTR64Bit); }
- bool hasCnMips() const { return STI.getFeatureBits()[Mips::FeatureCnMips]; }
+ bool hasCnMips() const { return STI.hasFeature(Mips::FeatureCnMips); }
- bool hasCnMipsP() const { return STI.getFeatureBits()[Mips::FeatureCnMipsP]; }
+ bool hasCnMipsP() const { return STI.hasFeature(Mips::FeatureCnMipsP); }
bool hasCOP3() const {
// Only present in MIPS-I and MIPS-II
}
bool MipsMCCodeEmitter::isMicroMips(const MCSubtargetInfo &STI) const {
- return STI.getFeatureBits()[Mips::FeatureMicroMips];
+ return STI.hasFeature(Mips::FeatureMicroMips);
}
bool MipsMCCodeEmitter::isMips32r6(const MCSubtargetInfo &STI) const {
- return STI.getFeatureBits()[Mips::FeatureMips32r6];
+ return STI.hasFeature(Mips::FeatureMips32r6);
}
void MipsMCCodeEmitter::EmitByte(unsigned char C, raw_ostream &OS) const {
} // end anonymous namespace
static bool isMicroMips(const MCSubtargetInfo *STI) {
- return STI->getFeatureBits()[Mips::FeatureMicroMips];
+ return STI->hasFeature(Mips::FeatureMicroMips);
}
static bool isMips32r6(const MCSubtargetInfo *STI) {
- return STI->getFeatureBits()[Mips::FeatureMips32r6];
+ return STI->hasFeature(Mips::FeatureMips32r6);
}
MipsTargetStreamer::MipsTargetStreamer(MCStreamer &S)
break;
}
case PPC::MFTB: {
- if (getSTI().getFeatureBits()[PPC::FeatureMFTB]) {
+ if (getSTI().hasFeature(PPC::FeatureMFTB)) {
assert(Inst.getNumOperands() == 2 && "Expecting two operands");
Inst.setOpcode(PPC::MFSPR);
}
// where th can be omitted when it is 0. dcbtst is the same. We take the
// server form to be the default, so swap the operands if we're parsing for
// an embedded core (they'll be swapped again upon printing).
- if (getSTI().getFeatureBits()[PPC::FeatureBookE] &&
+ if (getSTI().hasFeature(PPC::FeatureBookE) &&
Operands.size() == 4 &&
(Name == "dcbt" || Name == "dcbtst")) {
std::swap(Operands[1], Operands[3]);
// TODO: In this function we call decodeInstruction several times with
// different decoder tables. It may be possible to only call once by
// looking at the top 6 bits of the instruction.
- if (STI.getFeatureBits()[PPC::FeaturePrefixInstrs] && Bytes.size() >= 8) {
+ if (STI.hasFeature(PPC::FeaturePrefixInstrs) && Bytes.size() >= 8) {
uint32_t Prefix = ReadFunc(Bytes.data());
uint32_t BaseInst = ReadFunc(Bytes.data() + 4);
uint64_t Inst = BaseInst | (uint64_t)Prefix << 32;
// Read the instruction in the proper endianness.
uint64_t Inst = ReadFunc(Bytes.data());
- if (STI.getFeatureBits()[PPC::FeatureSPE]) {
+ if (STI.hasFeature(PPC::FeatureSPE)) {
DecodeStatus result =
decodeInstruction(DecoderTableSPE32, MI, Inst, Address, this, STI);
if (result != MCDisassembler::Fail)
// On AIX, only emit the extended mnemonics for dcbt and dcbtst if
// the "modern assembler" is available.
if ((MI->getOpcode() == PPC::DCBT || MI->getOpcode() == PPC::DCBTST) &&
- (!TT.isOSAIX() || STI.getFeatureBits()[PPC::FeatureModernAIXAs])) {
+ (!TT.isOSAIX() || STI.hasFeature(PPC::FeatureModernAIXAs))) {
unsigned char TH = MI->getOperand(0).getImm();
O << "\tdcbt";
if (MI->getOpcode() == PPC::DCBTST)
O << "t";
O << " ";
- bool IsBookE = STI.getFeatureBits()[PPC::FeatureBookE];
+ bool IsBookE = STI.hasFeature(PPC::FeatureBookE);
if (IsBookE && TH != 0 && TH != 16)
O << (unsigned int) TH << ", ";
// Calling the auto-generated decoder function.
- if (STI.getFeatureBits()[Sparc::FeatureV9])
+ if (STI.hasFeature(Sparc::FeatureV9))
{
Result = decodeInstruction(DecoderTableSparcV932, Instr, Insn, Address, this, STI);
}
#include "SparcGenAsmWriter.inc"
bool SparcInstPrinter::isV9(const MCSubtargetInfo &STI) const {
- return (STI.getFeatureBits()[Sparc::FeatureV9]) != 0;
+ return (STI.hasFeature(Sparc::FeatureV9)) != 0;
}
void SparcInstPrinter::printRegName(raw_ostream &OS, MCRegister Reg) const {
void SystemZAsmPrinter::emitAttributes(Module &M) {
if (M.getModuleFlag("s390x-visible-vector-ABI")) {
bool HasVectorFeature =
- TM.getMCSubtargetInfo()->getFeatureBits()[SystemZ::FeatureVector];
+ TM.getMCSubtargetInfo()->hasFeature(SystemZ::FeatureVector);
OutStreamer->emitGNUAttribute(8, HasVectorFeature ? 2 : 1);
}
}
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Is64Bit];
+ return getSTI().hasFeature(X86::Is64Bit);
}
bool is32BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Is32Bit];
+ return getSTI().hasFeature(X86::Is32Bit);
}
bool is16BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Is16Bit];
+ return getSTI().hasFeature(X86::Is16Bit);
}
void SwitchMode(unsigned mode) {
MCSubtargetInfo &STI = copySTI();
void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands,
MCStreamer &Out) {
if (LVIInlineAsmHardening &&
- getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity])
+ getSTI().hasFeature(X86::FeatureLVIControlFlowIntegrity))
applyLVICFIMitigation(Inst, Out);
Out.emitInstruction(Inst, getSTI());
if (LVIInlineAsmHardening &&
- getSTI().getFeatureBits()[X86::FeatureLVILoadHardening])
+ getSTI().hasFeature(X86::FeatureLVILoadHardening))
applyLVILoadHardeningMitigation(Inst, Out);
}
// InstrInfo.td as soon as Requires clause is supported properly
// for InstAlias.
if (MI->getOpcode() == X86::CALLpcrel32 &&
- (STI.getFeatureBits()[X86::Is64Bit])) {
+ (STI.hasFeature(X86::Is64Bit))) {
OS << "\tcallq\t";
printPCRelImm(MI, Address, 0, OS);
}
// 0x66 to be interpreted as "data16" by the asm printer.
// Thus we add an adjustment here in order to print the "right" instruction.
else if (MI->getOpcode() == X86::DATA16_PREFIX &&
- STI.getFeatureBits()[X86::Is16Bit]) {
+ STI.hasFeature(X86::Is16Bit)) {
OS << "\tdata32";
}
// Try to print any aliases first.
void X86AsmBackend::relaxInstruction(MCInst &Inst,
const MCSubtargetInfo &STI) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
- bool Is16BitMode = STI.getFeatureBits()[X86::Is16Bit];
+ bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
if (RelaxedOp == Inst.getOpcode()) {
static bool isFullyRelaxed(const MCRelaxableFragment &RF) {
auto &Inst = RF.getInst();
auto &STI = *RF.getSubtargetInfo();
- bool Is16BitMode = STI.getFeatureBits()[X86::Is16Bit];
+ bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
return getRelaxedOpcode(Inst, Is16BitMode) == Inst.getOpcode();
}
return 4;
if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
return 1;
- if (STI.getFeatureBits()[X86::TuningFast7ByteNOP])
+ if (STI.hasFeature(X86::TuningFast7ByteNOP))
return 7;
- if (STI.getFeatureBits()[X86::TuningFast15ByteNOP])
+ if (STI.hasFeature(X86::TuningFast15ByteNOP))
return 15;
- if (STI.getFeatureBits()[X86::TuningFast11ByteNOP])
+ if (STI.hasFeature(X86::TuningFast11ByteNOP))
return 11;
// FIXME: handle 32-bit mode
// 15-bytes is the longest single NOP instruction, but 10-bytes is
};
const char(*Nops)[11] =
- STI->getFeatureBits()[X86::Is16Bit] ? Nops16Bit : Nops32Bit;
+ STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
// In 16-bit mode, print data16 as data32.
if (MI->getOpcode() == X86::DATA16_PREFIX &&
- STI.getFeatureBits()[X86::Is16Bit]) {
+ STI.hasFeature(X86::Is16Bit)) {
OS << "\tdata32";
} else if (!printAliasInstr(MI, Address, OS) && !printVecCompareInstr(MI, OS))
printInstruction(MI, Address, OS);
: MCDisassembler(STI, Ctx), IsLittleEndian(isLE) {}
bool hasDensity() const {
- return STI.getFeatureBits()[Xtensa::FeatureDensity];
+ return STI.hasFeature(Xtensa::FeatureDensity);
}
DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,