/// Get a list of 'hint' registers that the register allocator should try
/// first when allocating a physical register for the virtual register
/// VirtReg. These registers are effectively moved to the front of the
- /// allocation order.
+ /// allocation order. If true is returned, regalloc will try to only use
+ /// hints to the greatest extent possible even if it means spilling.
///
/// The Order argument is the allocation order for VirtReg's register class
/// as returned from RegisterClassInfo::getOrder(). The hint registers must
/// HintType == 0. Targets that override this function should defer to the
/// default implementation if they have no reason to change the allocation
/// order for VirtReg. There may be target-independent hints.
- virtual void getRegAllocationHints(unsigned VirtReg,
+ virtual bool getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
const VirtRegMap &VRM,
const RegisterClassInfo &RegClassInfo,
const LiveRegMatrix *Matrix)
- : Pos(0) {
+ : Pos(0), HardHints(false) {
const MachineFunction &MF = VRM.getMachineFunction();
const TargetRegisterInfo *TRI = &VRM.getTargetRegInfo();
Order = RegClassInfo.getOrder(MF.getRegInfo().getRegClass(VirtReg));
- TRI->getRegAllocationHints(VirtReg, Order, Hints, MF, &VRM, Matrix);
+ if (TRI->getRegAllocationHints(VirtReg, Order, Hints, MF, &VRM, Matrix))
+ HardHints = true;
rewind();
DEBUG({
ArrayRef<MCPhysReg> Order;
int Pos;
+ // If HardHints is true, *only* Hints will be returned.
+ bool HardHints;
+
public:
+
/// Create a new AllocationOrder for VirtReg.
/// @param VirtReg Virtual register to allocate for.
/// @param VRM Virtual register map for function.
unsigned next(unsigned Limit = 0) {
if (Pos < 0)
return Hints.end()[Pos++];
+ if (HardHints)
+ return 0;
if (!Limit)
Limit = Order.size();
while (Pos < int(Limit)) {
unsigned nextWithDups(unsigned Limit) {
if (Pos < 0)
return Hints.end()[Pos++];
+ if (HardHints)
+ return 0;
if (Pos < int(Limit))
return Order[Pos++];
return 0;
}
// Compute target-independent register allocator hints to help eliminate copies.
-void
+bool
TargetRegisterInfo::getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
// Check that Phys is a valid hint in VirtReg's register class.
if (!isPhysicalRegister(Phys))
- return;
+ return false;
if (MRI.isReserved(Phys))
- return;
+ return false;
// Check that Phys is in the allocation order. We shouldn't heed hints
// from VirtReg's register class if they aren't in the allocation order. The
// target probably has a reason for removing the register.
if (!is_contained(Order, Phys))
- return;
+ return false;
// All clear, tell the register allocator to prefer this register.
Hints.push_back(Phys);
+ return false;
}
bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
}
// Resolve the RegPairEven / RegPairOdd register allocator hints.
-void
+bool
ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
break;
default:
TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
- return;
+ return false;
}
// This register should preferably be even (Odd == 0) or odd (Odd == 1).
// the paired register as the first hint.
unsigned Paired = Hint.second;
if (Paired == 0)
- return;
+ return false;
unsigned PairedPhys = 0;
if (TargetRegisterInfo::isPhysicalRegister(Paired)) {
continue;
Hints.push_back(Reg);
}
+ return false;
}
void
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const override;
- void getRegAllocationHints(unsigned VirtReg,
+ bool getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
#include "SystemZ.h"
#include "SystemZInstrBuilder.h"
#include "SystemZSubtarget.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#define GET_INSTRMAP_INFO
#include "SystemZGenInstrInfo.inc"
+#define DEBUG_TYPE "systemz-II"
+STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");
+
// Return a mask with Count low bits set.
static uint64_t allOnes(unsigned int Count) {
return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
MI.setDesc(get(LowOpcode));
else if (DestIsHigh && SrcIsHigh)
MI.setDesc(get(HighOpcode));
+ else
+ LOCRMuxJumps++;
// If we were unable to implement the pseudo with a single instruction, we
// need to convert it back into a branch sequence. This cannot be done here
#include "SystemZInstrInfo.h"
#include "SystemZSubtarget.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/VirtRegMap.h"
using namespace llvm;
SystemZRegisterInfo::SystemZRegisterInfo()
: SystemZGenRegisterInfo(SystemZ::R14D) {}
+// Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
+// somehow belongs in it. Otherwise, return GRX32.
+static const TargetRegisterClass *getRC32(MachineOperand &MO,
+ const VirtRegMap *VRM,
+ const MachineRegisterInfo *MRI) {
+ const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
+
+ if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
+ MO.getSubReg() == SystemZ::subreg_l32)
+ return &SystemZ::GR32BitRegClass;
+ if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
+ MO.getSubReg() == SystemZ::subreg_h32)
+ return &SystemZ::GRH32BitRegClass;
+
+ if (VRM && VRM->hasPhys(MO.getReg())) {
+ unsigned PhysReg = VRM->getPhys(MO.getReg());
+ if (SystemZ::GR32BitRegClass.contains(PhysReg))
+ return &SystemZ::GR32BitRegClass;
+ assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
+ "Phys reg not in GR32 or GRH32?");
+ return &SystemZ::GRH32BitRegClass;
+ }
+
+ assert (RC == &SystemZ::GRX32BitRegClass);
+ return RC;
+}
+
+bool
+SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
+ ArrayRef<MCPhysReg> Order,
+ SmallVectorImpl<MCPhysReg> &Hints,
+ const MachineFunction &MF,
+ const VirtRegMap *VRM,
+ const LiveRegMatrix *Matrix) const {
+ const MachineRegisterInfo *MRI = &MF.getRegInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
+ SmallVector<unsigned, 8> Worklist;
+ SmallSet<unsigned, 4> DoneRegs;
+ Worklist.push_back(VirtReg);
+ while (Worklist.size()) {
+ unsigned Reg = Worklist.pop_back_val();
+ if (!DoneRegs.insert(Reg).second)
+ continue;
+
+ for (auto &Use : MRI->use_instructions(Reg))
+ // For LOCRMux, see if the other operand is already a high or low
+ // register, and in that case give the correpsonding hints for
+ // VirtReg. LOCR instructions need both operands in either high or
+ // low parts.
+ if (Use.getOpcode() == SystemZ::LOCRMux) {
+ MachineOperand &TrueMO = Use.getOperand(1);
+ MachineOperand &FalseMO = Use.getOperand(2);
+ const TargetRegisterClass *RC =
+ TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
+ getRC32(TrueMO, VRM, MRI));
+ if (RC && RC != &SystemZ::GRX32BitRegClass) {
+ for (MCPhysReg Reg : Order)
+ if (RC->contains(Reg) && !MRI->isReserved(Reg))
+ Hints.push_back(Reg);
+ // Return true to make these hints the only regs available to
+ // RA. This may mean extra spilling but since the alternative is
+ // a jump sequence expansion of the LOCRMux, it is preferred.
+ return true;
+ }
+
+ // Add the other operand of the LOCRMux to the worklist.
+ unsigned OtherReg =
+ (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
+ if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
+ Worklist.push_back(OtherReg);
+ }
+ }
+ }
+
+ return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
+ VRM, Matrix);
+}
+
const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
return &SystemZ::ADDR64BitRegClass;
}
+ bool getRegAllocationHints(unsigned VirtReg,
+ ArrayRef<MCPhysReg> Order,
+ SmallVectorImpl<MCPhysReg> &Hints,
+ const MachineFunction &MF,
+ const VirtRegMap *VRM,
+ const LiveRegMatrix *Matrix) const override;
+
// Override TargetRegisterInfo.h.
bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
--- /dev/null
+# RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 -start-before=greedy %s -o - \
+# RUN: | FileCheck %s
+#
+# Test that regalloc manages (via regalloc hints) to avoid a LOCRMux jump
+# sequence expansion.
+
+--- |
+
+ declare i8* @foo(i8*, i32 signext, i32 signext) local_unnamed_addr
+
+ define i8* @fun(i8* returned) {
+ br label %2
+
+ ; <label>:2: ; preds = %6, %1
+ %3 = zext i16 undef to i32
+ switch i32 %3, label %4 [
+ i32 15, label %6
+ i32 125, label %5
+ ]
+
+ ; <label>:4: ; preds = %2
+ br label %6
+
+ ; <label>:5: ; preds = %2
+ br label %6
+
+ ; <label>:6: ; preds = %5, %4, %2
+ %7 = phi i32 [ 4, %2 ], [ undef, %4 ], [ 10, %5 ]
+ %8 = call i8* @foo(i8* undef, i32 signext undef, i32 signext %7)
+ br label %2
+ }
+
+...
+
+# CHECK: locr
+# CHECK-NOT: risblg
+
+---
+name: fun
+alignment: 2
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32bit }
+ - { id: 1, class: gr64bit }
+ - { id: 2, class: grx32bit }
+ - { id: 3, class: grx32bit }
+ - { id: 4, class: grx32bit }
+ - { id: 5, class: grx32bit }
+ - { id: 6, class: grx32bit }
+ - { id: 7, class: gr64bit }
+ - { id: 8, class: gr64bit }
+ - { id: 9, class: gr64bit }
+ - { id: 10, class: gr64bit }
+ - { id: 11, class: gr32bit }
+frameInfo:
+ hasCalls: true
+body: |
+ bb.0 (%ir-block.1):
+ %3 = LHIMux 0
+ %2 = LHIMux 4
+ %5 = LHIMux 10
+
+ bb.1 (%ir-block.2):
+ CHIMux %3, 0, implicit-def %cc
+ %0 = LOCRMux undef %0, %5, 14, 6, implicit %cc
+ %0 = LOCRMux %0, %2, 14, 6, implicit killed %cc
+ ADJCALLSTACKDOWN 0, 0
+ %7 = LGFR %0
+ %r3d = LGHI 0
+ %r4d = COPY %7
+ CallBRASL @foo, undef %r2d, killed %r3d, killed %r4d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def dead %r2d
+ ADJCALLSTACKUP 0, 0
+ J %bb.1
+
+...