#define DEBUG_TYPE "frame-info"
-static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST,
- const MachineFunction &MF) {
- return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
- ST.getMaxNumSGPRs(MF) / 4);
-}
-
-static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST,
- const MachineFunction &MF) {
- return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
-}
-
// Find a scratch register that we can use at the start of the prologue to
// re-align the stack pointer. We avoid using callee-save registers since they
// may appear to be free when this is called from canUseAsPrologue (during
// skip over user SGPRs and may leave unused holes.
unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
- ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
+ ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF);
AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
// Skip the last N reserved elements because they should have already been
// wave offset to a free SGPR.
Register ScratchWaveOffsetReg;
if (TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) {
- ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
+ ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF);
unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
AllSGPRs = AllSGPRs.slice(
std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded));
return false;
}
-static ArrayRef<MCPhysReg> getAllVGPR32(const GCNSubtarget &ST,
- const MachineFunction &MF) {
- return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF));
-}
-
// Find lowest available VGPR and use it as VGPR reserved for SGPR spills.
static bool lowerShiftReservedVGPR(MachineFunction &MF,
const GCNSubtarget &ST) {
MachineFrameInfo &FrameInfo = MF.getFrameInfo();
SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
Register LowestAvailableVGPR, ReservedVGPR;
- ArrayRef<MCPhysReg> AllVGPR32s = getAllVGPR32(ST, MF);
+ ArrayRef<MCPhysReg> AllVGPR32s = ST.getRegisterInfo()->getAllVGPR32(MF);
for (MCPhysReg Reg : AllVGPR32s) {
if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) {
LowestAvailableVGPR = Reg;
return false;
}
}
+
+ArrayRef<MCPhysReg>
+SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
+ return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
+ ST.getMaxNumSGPRs(MF) / 4);
+}
+
+ArrayRef<MCPhysReg>
+SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
+ return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
+}
+
+ArrayRef<MCPhysReg>
+SIRegisterInfo::getAllVGPR32(const MachineFunction &MF) const {
+ return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF));
+}
// \returns \p Reg otherwise.
MCPhysReg get32BitRegister(MCPhysReg Reg) const;
+ /// Return all SGPR128 which satisfy the waves per execution unit requirement
+ /// of the subtarget.
+ ArrayRef<MCPhysReg> getAllSGPR128(const MachineFunction &MF) const;
+
+ /// Return all SGPR32 which satisfy the waves per execution unit requirement
+ /// of the subtarget.
+ ArrayRef<MCPhysReg> getAllSGPR32(const MachineFunction &MF) const;
+
+ /// Return all VGPR32 which satisfy the waves per execution unit requirement
+ /// of the subtarget.
+ ArrayRef<MCPhysReg> getAllVGPR32(const MachineFunction &MF) const;
+
private:
void buildSpillLoadStore(MachineBasicBlock::iterator MI,
unsigned LoadStoreOp,