... in favour of the isPhysical/isVirtual methods.
unsigned OpNo) const {
if (!N->isMachineOpcode()) {
if (N->getOpcode() == ISD::CopyToReg) {
- unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
- if (Register::isVirtualRegister(Reg)) {
+ Register Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
+ if (Reg.isVirtual()) {
MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
return MRI.getRegClass(Reg);
}
}
for (const MachineOperand &MO : I.operands()) {
- if (Register::isPhysicalRegister(MO.getReg()))
+ if (MO.getReg().isPhysical())
continue;
const TargetRegisterClass *RC =
LinearizedRegion *Parent;
RegionMRT *RMRT;
- void storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
+ void storeLiveOutReg(MachineBasicBlock *MBB, Register Reg,
MachineInstr *DefInstr, const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI, PHILinearize &PHIInfo);
- void storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
+ void storeLiveOutRegRegion(RegionMRT *Region, Register Reg,
MachineInstr *DefInstr,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
void replaceLiveOut(unsigned OldReg, unsigned NewReg);
- void replaceRegister(unsigned Register, unsigned NewRegister,
+ void replaceRegister(unsigned Register, class Register NewRegister,
MachineRegisterInfo *MRI, bool ReplaceInside,
bool ReplaceOutside, bool IncludeLoopPHIs);
return Result;
}
-void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
+void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, Register Reg,
MachineInstr *DefInstr,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- if (Register::isVirtualRegister(Reg)) {
+ if (Reg.isVirtual()) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
// If this is a source register to a PHI we are chaining, it
}
}
-void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
+void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, Register Reg,
MachineInstr *DefInstr,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- if (Register::isVirtualRegister(Reg)) {
+ if (Reg.isVirtual()) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
for (auto &UI : MRI->use_operands(Reg)) {
}
}
-void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
+void LinearizedRegion::replaceRegister(unsigned Register,
+ class Register NewRegister,
MachineRegisterInfo *MRI,
bool ReplaceInside, bool ReplaceOutside,
bool IncludeLoopPHI) {
(IncludeLoopPHI && IsLoopPHI);
if (ShouldReplace) {
- if (Register::isPhysicalRegister(NewRegister)) {
+ if (NewRegister.isPhysical()) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
for (auto &RI : II.uses()) {
if (RI.isReg()) {
Register Reg = RI.getReg();
- if (Register::isVirtualRegister(Reg)) {
+ if (Reg.isVirtual()) {
if (hasNoDef(Reg, MRI))
continue;
if (!MRI->hasOneDef(Reg)) {
void createEntryPHIs(LinearizedRegion *CurrentRegion);
void resolvePHIInfos(MachineBasicBlock *FunctionEntry);
- void replaceRegisterWith(unsigned Register, unsigned NewRegister);
+ void replaceRegisterWith(unsigned Register, class Register NewRegister);
MachineBasicBlock *createIfRegion(MachineBasicBlock *MergeBB,
MachineBasicBlock *CodeBB,
PHIInfo.clear();
}
-void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
- unsigned NewRegister) {
+void AMDGPUMachineCFGStructurizer::replaceRegisterWith(
+ unsigned Register, class Register NewRegister) {
assert(Register != NewRegister && "Cannot replace a reg with itself");
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Register),
I != E;) {
MachineOperand &O = *I;
++I;
- if (Register::isPhysicalRegister(NewRegister)) {
+ if (NewRegister.isPhysical()) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
for (unsigned I = 0; I < Info->VAddrDwords; ++I) {
const MachineOperand &Op = MI.getOperand(VAddr0Idx + I);
Register Reg = Op.getReg();
- if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
+ if (Reg.isPhysical() || !VRM->isAssignedReg(Reg))
return NSA_Status::FIXED;
Register PhysReg = VRM->getPhys(Reg);
public:
OperandMask(unsigned r, unsigned s, unsigned m)
: Reg(r), SubReg(s), Mask(m) {}
- unsigned Reg;
+ Register Reg;
unsigned SubReg;
unsigned Mask;
};
class Candidate {
public:
- Candidate(MachineInstr *mi, unsigned reg, unsigned subreg,
+ Candidate(MachineInstr *mi, Register reg, unsigned subreg,
unsigned freebanks, unsigned weight)
: MI(mi), Reg(reg), SubReg(subreg), FreeBanks(freebanks),
Weight(weight) {}
#endif
MachineInstr *MI;
- unsigned Reg;
+ Register Reg;
unsigned SubReg;
unsigned FreeBanks;
unsigned Weight;
const MCPhysReg *CSRegs;
// Returns bank for a phys reg.
- unsigned getPhysRegBank(unsigned Reg, unsigned SubReg) const;
+ unsigned getPhysRegBank(Register Reg, unsigned SubReg) const;
// Return a bit set for each register bank used. 4 banks for VGPRs and
// 8 banks for SGPRs.
// Registers already processed and recorded in RegsUsed are excluded.
// If Bank is not -1 assume Reg:SubReg to belong to that Bank.
- uint32_t getRegBankMask(unsigned Reg, unsigned SubReg, int Bank);
+ uint32_t getRegBankMask(Register Reg, unsigned SubReg, int Bank);
// Analyze one instruction returning the number of stalls and a mask of the
// banks used by all operands.
// If Reg and Bank are provided, assume all uses of Reg will be replaced with
// a register chosen from Bank.
std::pair<unsigned, unsigned> analyzeInst(const MachineInstr &MI,
- unsigned Reg = AMDGPU::NoRegister,
+ Register Reg = Register(),
unsigned SubReg = 0, int Bank = -1);
// Return true if register is regular VGPR or SGPR or their tuples.
// Returns false for special registers like m0, vcc etc.
- bool isReassignable(unsigned Reg) const;
+ bool isReassignable(Register Reg) const;
// Check if registers' defs are old and may be pre-loaded.
// Returns 0 if both registers are old enough, 1 or 2 if one or both
// registers will not likely be pre-loaded.
unsigned getOperandGatherWeight(const MachineInstr& MI,
- unsigned Reg1,
- unsigned Reg2,
+ Register Reg1,
+ Register Reg2,
unsigned StallCycles) const;
// Find all bank bits in UsedBanks where Mask can be relocated to.
// Bank is relative to the register and not its subregister component.
// Returns 0 is a register is not reassignable.
- unsigned getFreeBanks(unsigned Reg, unsigned SubReg, unsigned Mask,
+ unsigned getFreeBanks(Register Reg, unsigned SubReg, unsigned Mask,
unsigned UsedBanks) const;
// Add cadidate instruction to the work list.
unsigned collectCandidates(MachineFunction &MF, bool Collect = true);
// Remove all candidates that read specified register.
- void removeCandidates(unsigned Reg);
+ void removeCandidates(Register Reg);
// Compute stalls within the uses of SrcReg replaced by a register from
// Bank. If Bank is -1 does not perform substitution. If Collect is set
// candidates are collected and added to work list.
- unsigned computeStallCycles(unsigned SrcReg,
- unsigned Reg = AMDGPU::NoRegister,
+ unsigned computeStallCycles(Register SrcReg,
+ Register Reg = Register(),
unsigned SubReg = 0, int Bank = -1,
bool Collect = false);
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
public:
- Printable printReg(unsigned Reg, unsigned SubReg = 0) const {
+ Printable printReg(Register Reg, unsigned SubReg = 0) const {
return Printable([Reg, SubReg, this](raw_ostream &OS) {
- if (Register::isPhysicalRegister(Reg)) {
+ if (Reg.isPhysical()) {
OS << llvm::printReg(Reg, TRI);
return;
}
char &llvm::GCNRegBankReassignID = GCNRegBankReassign::ID;
-unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg,
+unsigned GCNRegBankReassign::getPhysRegBank(Register Reg,
unsigned SubReg) const {
- assert(Register::isPhysicalRegister(Reg));
+ assert(Reg.isPhysical());
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getRegSizeInBits(*RC);
}
if (TRI->hasVGPRs(RC)) {
- Reg -= AMDGPU::VGPR0;
- return Reg % NUM_VGPR_BANKS;
+ unsigned RegNo = Reg - AMDGPU::VGPR0;
+ return RegNo % NUM_VGPR_BANKS;
}
- Reg = TRI->getEncodingValue(Reg) / 2;
- return Reg % NUM_SGPR_BANKS + SGPR_BANK_OFFSET;
+ unsigned RegNo = TRI->getEncodingValue(Reg) / 2;
+ return RegNo % NUM_SGPR_BANKS + SGPR_BANK_OFFSET;
}
-uint32_t GCNRegBankReassign::getRegBankMask(unsigned Reg, unsigned SubReg,
+uint32_t GCNRegBankReassign::getRegBankMask(Register Reg, unsigned SubReg,
int Bank) {
- if (Register::isVirtualRegister(Reg)) {
+ if (Reg.isVirtual()) {
if (!VRM->isAssignedReg(Reg))
return 0;
if (TRI->hasVGPRs(RC)) {
// VGPRs have 4 banks assigned in a round-robin fashion.
- Reg -= AMDGPU::VGPR0;
+ unsigned RegNo = Reg - AMDGPU::VGPR0;
uint32_t Mask = maskTrailingOnes<uint32_t>(Size);
unsigned Used = 0;
// Bitmask lacks an extract method
for (unsigned I = 0; I < Size; ++I)
- if (RegsUsed.test(Reg + I))
+ if (RegsUsed.test(RegNo + I))
Used |= 1 << I;
- RegsUsed.set(Reg, Reg + Size);
+ RegsUsed.set(RegNo, RegNo + Size);
Mask &= ~Used;
- Mask <<= (Bank == -1) ? Reg % NUM_VGPR_BANKS : uint32_t(Bank);
+ Mask <<= (Bank == -1) ? RegNo % NUM_VGPR_BANKS : uint32_t(Bank);
return (Mask | (Mask >> NUM_VGPR_BANKS)) & VGPR_BANK_MASK;
}
// SGPRs have 8 banks holding 2 consequitive registers each.
- Reg = TRI->getEncodingValue(Reg) / 2;
+ unsigned RegNo = TRI->getEncodingValue(Reg) / 2;
unsigned StartBit = AMDGPU::VGPR_32RegClass.getNumRegs();
- if (Reg + StartBit >= RegsUsed.size())
+ if (RegNo + StartBit >= RegsUsed.size())
return 0;
if (Size > 1)
unsigned Mask = (1 << Size) - 1;
unsigned Used = 0;
for (unsigned I = 0; I < Size; ++I)
- if (RegsUsed.test(StartBit + Reg + I))
+ if (RegsUsed.test(StartBit + RegNo + I))
Used |= 1 << I;
- RegsUsed.set(StartBit + Reg, StartBit + Reg + Size);
+ RegsUsed.set(StartBit + RegNo, StartBit + RegNo + Size);
Mask &= ~Used;
- Mask <<= (Bank == -1) ? Reg % NUM_SGPR_BANKS
+ Mask <<= (Bank == -1) ? RegNo % NUM_SGPR_BANKS
: unsigned(Bank - SGPR_BANK_OFFSET);
Mask = (Mask | (Mask >> NUM_SGPR_BANKS)) & SGPR_BANK_SHIFTED_MASK;
// Reserve 4 bank ids for VGPRs.
}
std::pair<unsigned, unsigned>
-GCNRegBankReassign::analyzeInst(const MachineInstr &MI, unsigned Reg,
+GCNRegBankReassign::analyzeInst(const MachineInstr &MI, Register Reg,
unsigned SubReg, int Bank) {
unsigned StallCycles = 0;
unsigned UsedBanks = 0;
}
unsigned GCNRegBankReassign::getOperandGatherWeight(const MachineInstr& MI,
- unsigned Reg1,
- unsigned Reg2,
+ Register Reg1,
+ Register Reg2,
unsigned StallCycles) const
{
unsigned Defs = 0;
return countPopulation(Defs);
}
-bool GCNRegBankReassign::isReassignable(unsigned Reg) const {
- if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
+bool GCNRegBankReassign::isReassignable(Register Reg) const {
+ if (Reg.isPhysical() || !VRM->isAssignedReg(Reg))
return false;
const MachineInstr *Def = MRI->getUniqueVRegDef(Reg);
return FreeBanks;
}
-unsigned GCNRegBankReassign::getFreeBanks(unsigned Reg,
+unsigned GCNRegBankReassign::getFreeBanks(Register Reg,
unsigned SubReg,
unsigned Mask,
unsigned UsedBanks) const {
if (!(OperandMasks[I].Mask & OperandMasks[J].Mask))
continue;
- unsigned Reg1 = OperandMasks[I].Reg;
- unsigned Reg2 = OperandMasks[J].Reg;
+ Register Reg1 = OperandMasks[I].Reg;
+ Register Reg2 = OperandMasks[J].Reg;
unsigned SubReg1 = OperandMasks[I].SubReg;
unsigned SubReg2 = OperandMasks[J].SubReg;
unsigned Mask1 = OperandMasks[I].Mask;
}
}
-unsigned GCNRegBankReassign::computeStallCycles(unsigned SrcReg, unsigned Reg,
+unsigned GCNRegBankReassign::computeStallCycles(Register SrcReg, Register Reg,
unsigned SubReg, int Bank,
bool Collect) {
unsigned TotalStallCycles = 0;
unsigned MaxReg = MaxNumRegs + (Bank < NUM_VGPR_BANKS ? AMDGPU::VGPR0
: AMDGPU::SGPR0);
- for (unsigned Reg : RC->getRegisters()) {
+ for (Register Reg : RC->getRegisters()) {
// Check occupancy limit.
if (TRI->isSubRegisterEq(Reg, MaxReg))
break;
LRM->unassign(LI);
while (!BankStalls.empty()) {
BankStall BS = BankStalls.pop_back_val();
- unsigned Reg = scavengeReg(LI, BS.Bank, C.SubReg);
+ Register Reg = scavengeReg(LI, BS.Bank, C.SubReg);
if (Reg == AMDGPU::NoRegister) {
LLVM_DEBUG(dbgs() << "No free registers in bank " << printBank(BS.Bank)
<< '\n');
return TotalStallCycles;
}
-void GCNRegBankReassign::removeCandidates(unsigned Reg) {
+void GCNRegBankReassign::removeCandidates(Register Reg) {
Candidates.remove_if([Reg, this](const Candidate& C) {
return C.MI->readsRegister(Reg, TRI);
});
///////////////////////////////////////////////////////////////////////////////
// GCNRegPressure
-unsigned GCNRegPressure::getRegKind(unsigned Reg,
+unsigned GCNRegPressure::getRegKind(Register Reg,
const MachineRegisterInfo &MRI) {
- assert(Register::isVirtualRegister(Reg));
+ assert(Reg.isVirtual());
const auto RC = MRI.getRegClass(Reg);
auto STI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
return STI->isSGPRClass(RC) ?
static LaneBitmask getDefRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI) {
- assert(MO.isDef() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
+ assert(MO.isDef() && MO.isReg() && MO.getReg().isVirtual());
// We don't rely on read-undef flag because in case of tentative schedule
// tracking it isn't set correctly yet. This works correctly however since
static LaneBitmask getUsedRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI,
const LiveIntervals &LIS) {
- assert(MO.isUse() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
+ assert(MO.isUse() && MO.isReg() && MO.getReg().isVirtual());
if (auto SubReg = MO.getSubReg())
return MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(SubReg);
const MachineRegisterInfo &MRI) {
SmallVector<RegisterMaskPair, 8> Res;
for (const auto &MO : MI.operands()) {
- if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
+ if (!MO.isReg() || !MO.getReg().isVirtual())
continue;
if (!MO.isUse() || !MO.readsReg())
continue;
MaxPressure = max(AtMIPressure, MaxPressure);
for (const auto &MO : MI.operands()) {
- if (!MO.isReg() || !MO.isDef() ||
- !Register::isVirtualRegister(MO.getReg()) || MO.isDead())
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg().isVirtual() || MO.isDead())
continue;
auto Reg = MO.getReg();
if (!MO.isReg() || !MO.isDef())
continue;
Register Reg = MO.getReg();
- if (!Register::isVirtualRegister(Reg))
+ if (!Reg.isVirtual())
continue;
auto &LiveMask = LiveRegs[Reg];
auto PrevMask = LiveMask;
private:
unsigned Value[TOTAL_KINDS];
- static unsigned getRegKind(unsigned Reg, const MachineRegisterInfo &MRI);
+ static unsigned getRegKind(Register Reg, const MachineRegisterInfo &MRI);
friend GCNRegPressure max(const GCNRegPressure &P1,
const GCNRegPressure &P2);
case R600::MASK_WRITE: {
Register maskedRegister = MI.getOperand(0).getReg();
- assert(Register::isVirtualRegister(maskedRegister));
+ assert(maskedRegister.isVirtual());
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
break;
MachineBasicBlock::iterator MBBI) const {
for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
E = MBBI->operands_end(); I != E; ++I) {
- if (I->isReg() && !Register::isVirtualRegister(I->getReg()) && I->isUse() &&
+ if (I->isReg() && !I->getReg().isVirtual() && I->isUse() &&
RI.isPhysRegLiveAcrossClauses(I->getReg()))
return false;
}
for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
E = MI.operands_end();
I != E; ++I) {
- if (!I->isReg() || !I->isUse() || Register::isVirtualRegister(I->getReg()))
+ if (!I->isReg() || !I->isUse() || I->getReg().isVirtual())
continue;
if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
- unsigned Reg = LI.first;
- if (Register::isVirtualRegister(Reg) || !IndirectRC->contains(Reg))
+ Register Reg = LI.first;
+ if (Reg.isVirtual() || !IndirectRC->contains(Reg))
continue;
unsigned RegIndex;
unsigned RegEnd;
for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
++RegIndex) {
- if (IndirectRC->getRegister(RegIndex) == Reg)
+ if (IndirectRC->getRegister(RegIndex) == (unsigned)Reg)
break;
}
Offset = std::max(Offset, (int)RegIndex);
if (MI->getOpcode() != R600::COPY)
return false;
- return !Register::isVirtualRegister(MI->getOperand(1).getReg());
+ return !MI->getOperand(1).getReg().isVirtual();
}
void R600SchedStrategy::releaseTopNode(SUnit *SU) {
}
-bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
+bool R600SchedStrategy::regBelongsToClass(Register Reg,
const TargetRegisterClass *RC) const {
- if (!Register::isVirtualRegister(Reg)) {
+ if (!Reg.isVirtual()) {
return RC->contains(Reg);
} else {
return MRI->getRegClass(Reg) == RC;
bool VLIW5;
int getInstKind(SUnit *SU);
- bool regBelongsToClass(unsigned Reg, const TargetRegisterClass *RC) const;
+ bool regBelongsToClass(Register Reg, const TargetRegisterClass *RC) const;
AluKind getAluKind(SUnit *SU) const;
void LoadAlu();
unsigned AvailablesAluCount() const;
}
}
-bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
- assert(!Register::isVirtualRegister(Reg));
+bool R600RegisterInfo::isPhysRegLiveAcrossClauses(Register Reg) const {
+ assert(!Reg.isVirtual());
switch (Reg) {
case R600::OQAP:
// \returns true if \p Reg can be defined in one ALU clause and used in
// another.
- bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
+ bool isPhysRegLiveAcrossClauses(Register Reg) const;
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
const SIRegisterInfo *TRI) {
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- if (!MI.getOperand(i).isReg() ||
- !Register::isVirtualRegister(MI.getOperand(i).getReg()))
+ if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
continue;
if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
Register DstReg = Copy.getOperand(0).getReg();
Register SrcReg = Copy.getOperand(1).getReg();
- const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg)
+ const TargetRegisterClass *SrcRC = SrcReg.isVirtual()
? MRI.getRegClass(SrcReg)
: TRI.getPhysRegClass(SrcReg);
// We don't really care about the subregister here.
// SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
- const TargetRegisterClass *DstRC = Register::isVirtualRegister(DstReg)
+ const TargetRegisterClass *DstRC = DstReg.isVirtual()
? MRI.getRegClass(DstReg)
: TRI.getPhysRegClass(DstReg);
auto &Src = MI.getOperand(1);
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = Src.getReg();
- if (!Register::isVirtualRegister(SrcReg) ||
- !Register::isVirtualRegister(DstReg))
+ if (!SrcReg.isVirtual() || !DstReg.isVirtual())
return false;
for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
return false;
// It is illegal to have vreg inputs to a physreg defining reg_sequence.
- if (Register::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
+ if (CopyUse.getOperand(0).getReg().isPhysical())
return false;
const TargetRegisterClass *SrcRC, *DstRC;
const TargetRegisterClass *SrcRC, *DstRC;
std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, *MRI);
- if (!Register::isVirtualRegister(DstReg)) {
+ if (!DstReg.isVirtual()) {
// If the destination register is a physical register there isn't
// really much we can do to fix this.
// Some special instructions use M0 as an input. Some even only use
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
Register SrcReg = MI.getOperand(1).getReg();
- if (!Register::isVirtualRegister(SrcReg)) {
+ if (!SrcReg.isVirtual()) {
TII->moveToVALU(MI, MDT);
break;
}
// that can't be resolved in later operand folding pass
bool Resolved = false;
for (MachineOperand *MO : {&Src0, &Src1}) {
- if (Register::isVirtualRegister(MO->getReg())) {
+ if (MO->getReg().isVirtual()) {
MachineInstr *DefMI = MRI->getVRegDef(MO->getReg());
if (DefMI && TII->isFoldableCopy(*DefMI)) {
const MachineOperand &Def = DefMI->getOperand(0);
return false;
Register UseReg = OpToFold.getReg();
- if (!Register::isVirtualRegister(UseReg))
+ if (!UseReg.isVirtual())
return false;
if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
MachineOperand &Op) {
if (Op.isReg()) {
// If this has a subregister, it obviously is a register source.
- if (Op.getSubReg() != AMDGPU::NoSubRegister ||
- !Register::isVirtualRegister(Op.getReg()))
+ if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
return &Op;
MachineInstr *Def = MRI.getVRegDef(Op.getReg());
for (FoldCandidate &Fold : FoldList) {
assert(!Fold.isReg() || Fold.OpToFold);
- if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
+ if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
Register Reg = Fold.OpToFold->getReg();
MachineInstr *DefMI = Fold.OpToFold->getParent();
if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
if (!FoldingImm && !OpToFold.isReg())
continue;
- if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
+ if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
continue;
// Prevent folding operands backwards in the function. For example,
// ...
// %vgpr0 = V_MOV_B32_e32 1, implicit %exec
MachineOperand &Dst = MI.getOperand(0);
- if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
+ if (Dst.isReg() && !Dst.getReg().isVirtual())
continue;
foldInstOperand(MI, OpToFold);
private:
template <typename Callable>
- void forAllLanes(unsigned Reg, LaneBitmask LaneMask, Callable Func) const;
+ void forAllLanes(Register Reg, LaneBitmask LaneMask, Callable Func) const;
bool canBundle(const MachineInstr &MI, RegUse &Defs, RegUse &Uses) const;
bool checkPressure(const MachineInstr &MI, GCNDownwardRPTracker &RPT);
S |= RegState::Kill;
if (MO.isEarlyClobber())
S |= RegState::EarlyClobber;
- if (Register::isPhysicalRegister(MO.getReg()) && MO.isRenamable())
+ if (MO.getReg().isPhysical() && MO.isRenamable())
S |= RegState::Renamable;
return S;
}
template <typename Callable>
-void SIFormMemoryClauses::forAllLanes(unsigned Reg, LaneBitmask LaneMask,
+void SIFormMemoryClauses::forAllLanes(Register Reg, LaneBitmask LaneMask,
Callable Func) const {
- if (LaneMask.all() || Register::isPhysicalRegister(Reg) ||
+ if (LaneMask.all() || Reg.isPhysical() ||
LaneMask == MRI->getMaxLaneMaskForVReg(Reg)) {
Func(0);
return;
if (Conflict == Map.end())
continue;
- if (Register::isPhysicalRegister(Reg))
+ if (Reg.isPhysical())
return false;
LaneBitmask Mask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
if (!Reg)
continue;
- LaneBitmask Mask = Register::isVirtualRegister(Reg)
+ LaneBitmask Mask = Reg.isVirtual()
? TRI->getSubRegIndexLaneMask(MO.getSubReg())
: LaneBitmask::getAll();
RegUse &Map = MO.isDef() ? Defs : Uses;
}
for (auto &&R : Defs) {
- unsigned Reg = R.first;
+ Register Reg = R.first;
Uses.erase(Reg);
- if (Register::isPhysicalRegister(Reg))
+ if (Reg.isPhysical())
continue;
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
}
for (auto &&R : Uses) {
- unsigned Reg = R.first;
- if (Register::isPhysicalRegister(Reg))
+ Register Reg = R.first;
+ if (Reg.isPhysical())
continue;
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
// Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
// to try understanding copies to physical registers.
- if (SrcVal.getValueType() == MVT::i1 &&
- Register::isPhysicalRegister(DestReg->getReg())) {
+ if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) {
SDLoc SL(Node);
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
SDValue VReg = DAG.getRegister(
MachineOperand &Op = MI.getOperand(I);
if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
- !Register::isVirtualRegister(Op.getReg()) ||
- !TRI->isAGPR(MRI, Op.getReg()))
+ !Op.getReg().isVirtual() || !TRI->isAGPR(MRI, Op.getReg()))
continue;
auto *Src = MRI.getUniqueVRegDef(Op.getReg());
if (!Src || !Src->isCopy() ||
// The SGPR spill/restore instructions only work on number sgprs, so we need
// to make sure we are using the correct register class.
- if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) {
+ if (SrcReg.isVirtual() && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
}
MRI->hasOneUse(Src0->getReg())) {
Src0->ChangeToImmediate(Def->getOperand(1).getImm());
Src0Inlined = true;
- } else if ((Register::isPhysicalRegister(Src0->getReg()) &&
+ } else if ((Src0->getReg().isPhysical() &&
(ST.getConstantBusLimit(Opc) <= 1 &&
RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
- (Register::isVirtualRegister(Src0->getReg()) &&
+ (Src0->getReg().isVirtual() &&
(ST.getConstantBusLimit(Opc) <= 1 &&
RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
return false;
MRI->hasOneUse(Src1->getReg()) &&
commuteInstruction(UseMI)) {
Src0->ChangeToImmediate(Def->getOperand(1).getImm());
- } else if ((Register::isPhysicalRegister(Src1->getReg()) &&
+ } else if ((Src1->getReg().isPhysical() &&
RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
- (Register::isVirtualRegister(Src1->getReg()) &&
+ (Src1->getReg().isVirtual() &&
RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
return false;
// VGPR is okay as Src1 - fallthrough
if (!MO.isUse())
return false;
- if (Register::isVirtualRegister(MO.getReg()))
+ if (MO.getReg().isVirtual())
return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
// Null is free
static bool isSubRegOf(const SIRegisterInfo &TRI,
const MachineOperand &SuperVec,
const MachineOperand &SubReg) {
- if (Register::isPhysicalRegister(SubReg.getReg()))
+ if (SubReg.getReg().isPhysical())
return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
continue;
Register Reg = Op.getReg();
- if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) {
+ if (!Reg.isVirtual() && !RC->contains(Reg)) {
ErrInfo = "inlineasm operand has incorrect register class.";
return false;
}
if (RegClass != -1) {
Register Reg = MI.getOperand(i).getReg();
- if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg))
+ if (Reg == AMDGPU::NoRegister || Reg.isVirtual())
continue;
const TargetRegisterClass *RC = RI.getRegClass(RegClass);
ErrInfo =
"Dst register should be tied to implicit use of preserved register";
return false;
- } else if (Register::isPhysicalRegister(TiedMO.getReg()) &&
+ } else if (TiedMO.getReg().isPhysical() &&
Dst.getReg() != TiedMO.getReg()) {
ErrInfo = "Dst register should use same physical register as preserved";
return false;
Desc.OpInfo[OpNo].RegClass == -1) {
Register Reg = MI.getOperand(OpNo).getReg();
- if (Register::isVirtualRegister(Reg))
+ if (Reg.isVirtual())
return MRI.getRegClass(Reg);
return RI.getPhysRegClass(Reg);
}
return false;
Register Reg = MO.getReg();
- const TargetRegisterClass *RC = Register::isVirtualRegister(Reg)
- ? MRI.getRegClass(Reg)
- : RI.getPhysRegClass(Reg);
+ const TargetRegisterClass *RC =
+ Reg.isVirtual() ? MRI.getRegClass(Reg) : RI.getPhysRegClass(Reg);
const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass);
if (MO.getSubReg()) {
if (MI.getOpcode() == AMDGPU::PHI) {
const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
- if (!MI.getOperand(i).isReg() ||
- !Register::isVirtualRegister(MI.getOperand(i).getReg()))
+ if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
continue;
const TargetRegisterClass *OpRC =
MRI.getRegClass(MI.getOperand(i).getReg());
// Update all the operands so they have the same type.
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
+ if (!Op.isReg() || !Op.getReg().isVirtual())
continue;
// MI is a PHI instruction.
// subregister index types e.g. sub0_sub1 + sub2 + sub3
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
+ if (!Op.isReg() || !Op.getReg().isVirtual())
continue;
const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
unsigned NewDstReg = AMDGPU::NoRegister;
if (HasDst) {
Register DstReg = Inst.getOperand(0).getReg();
- if (Register::isPhysicalRegister(DstReg))
+ if (DstReg.isPhysical())
continue;
// Update the destination register class.
if (!NewDstRC)
continue;
- if (Inst.isCopy() &&
- Register::isVirtualRegister(Inst.getOperand(1).getReg()) &&
+ if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() &&
NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
// Instead of creating a copy where src and dst are the same register
// class, we just replace all uses of dst with src. These kinds of
MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
MachineRegisterInfo &MRI) {
assert(MRI.isSSA());
- if (!Register::isVirtualRegister(P.Reg))
+ if (!P.Reg.isVirtual())
return nullptr;
auto RSR = P;
case AMDGPU::COPY:
case AMDGPU::V_MOV_B32_e32: {
auto &Op1 = MI->getOperand(1);
- if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) {
+ if (Op1.isReg() && Op1.getReg().isVirtual()) {
if (Op1.isUndef())
return nullptr;
RSR = getRegSubRegPair(Op1);
return false;
// TODO: We should be able to merge physical reg addreses.
- if (Register::isPhysicalRegister(AddrOp->getReg()))
+ if (AddrOp->getReg().isPhysical())
return false;
// If an address has only one use then there will be on other
if (Op.isReg()) {
if (Op.isDef())
RegDefs.insert(Op.getReg());
- else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
+ else if (Op.readsReg() && Op.getReg().isPhysical())
PhysRegUses.insert(Op.getReg());
}
}
// be moved for merging, then we need to move the def-instruction as well.
// This can only happen for physical registers such as M0; virtual
// registers are in SSA form.
- if (Use.isReg() &&
- ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
- (Use.isDef() && RegDefs.count(Use.getReg())) ||
- (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
- PhysRegUses.count(Use.getReg())))) {
+ if (Use.isReg() && ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
+ (Use.isDef() && RegDefs.count(Use.getReg())) ||
+ (Use.isDef() && Use.getReg().isPhysical() &&
+ PhysRegUses.count(Use.getReg())))) {
Insts.push_back(&MI);
addDefsUsesToList(MI, RegDefs, PhysRegUses);
return true;
void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
SmallVectorImpl<MachineOperand> &Src) const {
MachineOperand &Op = MI.getOperand(OpNo);
- if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) {
+ if (!Op.isReg() || !Op.getReg().isVirtual()) {
Src.push_back(Op);
return;
}
for (const auto &SrcOp : Def->explicit_operands())
if (SrcOp.isReg() && SrcOp.isUse() &&
- (Register::isVirtualRegister(SrcOp.getReg()) || SrcOp.getReg() == Exec))
+ (SrcOp.getReg().isVirtual() || SrcOp.getReg() == Exec))
Src.push_back(SrcOp);
}
void lowerCopiesFromI1();
void lowerPhis();
void lowerCopiesToI1();
- bool isConstantLaneMask(unsigned Reg, bool &Val) const;
+ bool isConstantLaneMask(Register Reg, bool &Val) const;
void buildMergeLaneMasks(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, const DebugLoc &DL,
unsigned DstReg, unsigned PrevReg, unsigned CurReg);
MachineBasicBlock::iterator
getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
- bool isVreg1(unsigned Reg) const {
- return Register::isVirtualRegister(Reg) &&
- MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
+ bool isVreg1(Register Reg) const {
+ return Reg.isVirtual() && MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
}
bool isLaneMaskReg(unsigned Reg) const {
Register SrcReg = MI.getOperand(1).getReg();
assert(!MI.getOperand(1).getSubReg());
- if (!Register::isVirtualRegister(SrcReg) ||
- (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
+ if (!SrcReg.isVirtual() || (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
unsigned TmpReg = createLaneMaskReg(*MF);
BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
}
}
-bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
+bool SILowerI1Copies::isConstantLaneMask(Register Reg, bool &Val) const {
const MachineInstr *MI;
for (;;) {
MI = MRI->getUniqueVRegDef(Reg);
break;
Reg = MI->getOperand(1).getReg();
- if (!Register::isVirtualRegister(Reg))
+ if (!Reg.isVirtual())
return false;
if (!isLaneMaskReg(Reg))
return false;
// Comparing to LiveInRegs is not sufficient to differenciate 4 vs 5, 7
// The use of findDefBetween removes the case 4.
for (const auto &RegMaskPair : RPTracker.getPressure().LiveOutRegs) {
- unsigned Reg = RegMaskPair.RegUnit;
- if (Register::isVirtualRegister(Reg) &&
+ Register Reg = RegMaskPair.RegUnit;
+ if (Reg.isVirtual() &&
isDefBetween(Reg, LIS->getInstructionIndex(*BeginBlock).getRegSlot(),
LIS->getInstructionIndex(*EndBlock).getRegSlot(), MRI,
LIS)) {
// Tracking of currently alive registers to determine VGPR Usage.
void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) {
- for (unsigned Reg : Regs) {
+ for (Register Reg : Regs) {
// For now only track virtual registers.
- if (!Register::isVirtualRegister(Reg))
+ if (!Reg.isVirtual())
continue;
// If not already in the live set, then add it.
(void) LiveRegs.insert(Reg);
std::vector<int> DiffSetPressure;
DiffSetPressure.assign(DAG->getTRI()->getNumRegPressureSets(), 0);
- for (unsigned Reg : InRegs) {
+ for (Register Reg : InRegs) {
// For now only track virtual registers.
- if (!Register::isVirtualRegister(Reg))
+ if (!Reg.isVirtual())
continue;
if (LiveRegsConsumers[Reg] > 1)
continue;
}
}
- for (unsigned Reg : OutRegs) {
+ for (Register Reg : OutRegs) {
// For now only track virtual registers.
- if (!Register::isVirtualRegister(Reg))
+ if (!Reg.isVirtual())
continue;
PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
for (; PSetI.isValid(); ++PSetI) {
VgprUsage = 0;
SgprUsage = 0;
for (_Iterator RegI = First; RegI != End; ++RegI) {
- unsigned Reg = *RegI;
+ Register Reg = *RegI;
// For now only track virtual registers
- if (!Register::isVirtualRegister(Reg))
+ if (!Reg.isVirtual())
continue;
PSetIterator PSetI = MRI.getPressureSets(Reg);
for (; PSetI.isValid(); ++PSetI) {
// Try to remove compare. Cmp value should not used in between of cmp
// and s_and_b64 if VCC or just unused if any other register.
- if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
+ if ((CmpReg.isVirtual() && MRI.use_nodbg_empty(CmpReg)) ||
(CmpReg == CondReg &&
std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
[&](const MachineInstr &MI) {
Cmp->eraseFromParent();
// Try to remove v_cndmask_b32.
- if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
+ if (SelReg.isVirtual() && MRI.use_nodbg_empty(SelReg)) {
LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
LIS->RemoveMachineInstrFromMaps(*Sel);
MachineRegisterInfo &MRI = MF.getRegInfo();
LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
- DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
+ DenseSet<Register> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
bool Changed = false;
if (Changed) {
for (auto Reg : RecalcRegs) {
- if (Register::isVirtualRegister(Reg)) {
+ if (Reg.isVirtual()) {
LIS->removeInterval(Reg);
if (!MRI.reg_empty(Reg))
LIS->createAndComputeVirtRegInterval(Reg);
MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (Register::isPhysicalRegister(Src1->getReg()) ||
- Register::isPhysicalRegister(Dst->getReg()))
+ if (Src1->getReg().isPhysical() || Dst->getReg().isPhysical())
break;
if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (Register::isPhysicalRegister(Src1->getReg()) ||
- Register::isPhysicalRegister(Dst->getReg()))
+ if (Src1->getReg().isPhysical() || Dst->getReg().isPhysical())
break;
if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (Register::isPhysicalRegister(Src0->getReg()) ||
- Register::isPhysicalRegister(Dst->getReg()))
+ if (Src0->getReg().isPhysical() || Dst->getReg().isPhysical())
break;
return std::make_unique<SDWASrcOperand>(
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (Register::isPhysicalRegister(ValSrc->getReg()) ||
- Register::isPhysicalRegister(Dst->getReg()))
+ if (ValSrc->getReg().isPhysical() || Dst->getReg().isPhysical())
break;
return std::make_unique<SDWASrcOperand>(
if (!TRI->isVGPR(*MRI, Reg))
return false;
- if (Register::isPhysicalRegister(Reg))
+ if (Reg.isPhysical())
return false;
if (VRM->hasPhys(Reg))
continue;
const Register VirtReg = MO.getReg();
- if (Register::isPhysicalRegister(VirtReg))
+ if (VirtReg.isPhysical())
continue;
if (!VRM->hasPhys(VirtReg))
MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (Src0.isReg()) {
Register Reg = Src0.getReg();
- if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
+ if (Reg.isVirtual() && MRI.hasOneUse(Reg)) {
MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
if (Def && Def->isMoveImmediate()) {
MachineOperand &MovSrc = Def->getOperand(1);
}
if (NewImm != 0) {
- if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
+ if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
return true;
// This is the same as MachineInstr::readsRegister/modifiesRegister except
// it takes subregs into account.
static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
- unsigned Reg, unsigned SubReg,
+ Register Reg, unsigned SubReg,
const SIRegisterInfo &TRI) {
for (const MachineOperand &MO : R) {
if (!MO.isReg())
continue;
- if (Register::isPhysicalRegister(Reg) &&
- Register::isPhysicalRegister(MO.getReg())) {
+ if (Reg.isPhysical() && MO.getReg().isPhysical()) {
if (TRI.regsOverlap(Reg, MO.getReg()))
return true;
- } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
+ } else if (MO.getReg() == Reg && Reg.isVirtual()) {
LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
TRI.getSubRegIndexLaneMask(MO.getSubReg());
if (Overlap.any())
}
static TargetInstrInfo::RegSubRegPair
-getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
+getSubRegForIndex(Register Reg, unsigned Sub, unsigned I,
const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
- if (Register::isPhysicalRegister(Reg)) {
+ if (Reg.isPhysical()) {
Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
} else {
Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub));
// optimizations happen because this will confuse them.
// XXX - not exactly a check for post-regalloc run.
MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm() &&
- Register::isPhysicalRegister(MI.getOperand(0).getReg())) {
+ if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
int32_t ReverseImm;
if (isReverseInlineImm(TII, Src, ReverseImm)) {
MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
// FIXME: This could work better if hints worked with subregisters. If
// we have a vector add of a constant, we usually don't get the correct
// allocation due to the subregister usage.
- if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
+ if (Dest->getReg().isVirtual() && Src0->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
continue;
const MachineOperand &Dst = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
+ if (Src.isImm() && Dst.getReg().isPhysical()) {
int32_t ReverseImm;
if (isKImmOperand(TII, Src))
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
if (TII->isVOPC(Op32)) {
Register DstReg = MI.getOperand(0).getReg();
- if (Register::isVirtualRegister(DstReg)) {
+ if (DstReg.isVirtual()) {
// VOPC instructions can only write to the VCC register. We can't
// force them to use VCC here, because this is only one register and
// cannot deal with sequences which would require multiple copies of
if (!Src2->isReg())
continue;
Register SReg = Src2->getReg();
- if (Register::isVirtualRegister(SReg)) {
+ if (SReg.isVirtual()) {
MRI.setRegAllocationHint(SReg, 0, VCCReg);
continue;
}
bool Next = false;
if (SDst->getReg() != VCCReg) {
- if (Register::isVirtualRegister(SDst->getReg()))
+ if (SDst->getReg().isVirtual())
MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
Next = true;
}
// All of the instructions with carry outs also have an SGPR input in
// src2.
if (Src2 && Src2->getReg() != VCCReg) {
- if (Register::isVirtualRegister(Src2->getReg()))
+ if (Src2->getReg().isVirtual())
MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
Next = true;
}
// Handle physical registers that we need to track; this is mostly relevant
// for VCC, which can appear as the (implicit) input of a uniform branch,
// e.g. when a loop counter is stored in a VGPR.
- if (!Register::isVirtualRegister(Reg)) {
+ if (!Reg.isVirtual()) {
if (Reg == AMDGPU::EXEC || Reg == AMDGPU::EXEC_LO)
continue;
LowerToCopyInstrs.push_back(&MI);
} else {
Register Reg = Inactive.getReg();
- if (Register::isVirtualRegister(Reg)) {
+ if (Reg.isVirtual()) {
for (MachineInstr &DefMI : MRI->def_instructions(Reg))
markInstruction(DefMI, StateWWM, Worklist);
}
Register Reg = MO.getReg();
- if (!Register::isVirtualRegister(Reg) &&
+ if (!Reg.isVirtual() &&
TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) {
Flags = StateWQM;
break;
const Register Reg = MI->getOperand(0).getReg();
if (TRI->isVGPR(*MRI, Reg)) {
- const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg)
- ? MRI->getRegClass(Reg)
- : TRI->getPhysRegClass(Reg);
+ const TargetRegisterClass *regClass =
+ Reg.isVirtual() ? MRI->getRegClass(Reg) : TRI->getPhysRegClass(Reg);
const unsigned MovOp = TII->getMovOpcode(regClass);
MI->setDesc(TII->get(MovOp));