Use PackedRegisterRef to store the register information in the graph nodes.
This commit also removes support for virtual registers. It has never been
tested or used. It will be possible to add it back if there is a need.
llvm-svn: 284255
RegisterRef OffsetRR;
NodeId OffsetRegRD = 0;
for (NodeAddr<UseNode *> UA : AddAslSN.Addr->members_if(DFG->IsUse, *DFG)) {
- RegisterRef RR = UA.Addr->getRegRef();
+ RegisterRef RR = UA.Addr->getRegRef(*DFG);
if (OffsetReg == RR.Reg) {
OffsetRR = RR;
OffsetRegRD = UA.Addr->getReachingDef();
NodeList &UNodeList) {
for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
NodeAddr<UseNode *> UN = *I;
- RegisterRef UR = UN.Addr->getRegRef();
+ RegisterRef UR = UN.Addr->getRegRef(*DFG);
NodeSet Visited, Defs;
const auto &ReachingDefs = LV->getAllReachingDefsRec(UR, UN, Visited, Defs);
if (ReachingDefs.size() > 1) {
for (NodeAddr<DefNode *> DA : SA.Addr->members_if(DFG->IsDef, *DFG)) {
DEBUG(dbgs() << "\t\t[DefNode]: " << Print<NodeAddr<DefNode *>>(DA, *DFG)
<< "\n");
- RegisterRef DR = DA.Addr->getRegRef();
+ RegisterRef DR = DA.Addr->getRegRef(*DFG);
auto UseSet = LV->getAllReachedUses(DR, DA);
for (auto UI : UseSet) {
void HexagonOptAddrMode::updateMap(NodeAddr<InstrNode *> IA) {
RegisterSet RRs;
for (NodeAddr<RefNode *> RA : IA.Addr->members(*DFG))
- RRs.insert(RA.Addr->getRegRef());
+ RRs.insert(RA.Addr->getRegRef(*DFG));
bool Common = false;
for (auto &R : RDefMap) {
if (!RRs.count(R.first))
EM.insert(std::make_pair(DstR, SrcR));
};
+ DataFlowGraph &DFG = getDFG();
unsigned Opc = MI->getOpcode();
switch (Opc) {
case Hexagon::A2_combinew: {
const MachineOperand &HiOp = MI->getOperand(1);
const MachineOperand &LoOp = MI->getOperand(2);
assert(DstOp.getSubReg() == 0 && "Unexpected subregister");
- mapRegs({ DstOp.getReg(), Hexagon::subreg_hireg },
- { HiOp.getReg(), HiOp.getSubReg() });
- mapRegs({ DstOp.getReg(), Hexagon::subreg_loreg },
- { LoOp.getReg(), LoOp.getSubReg() });
+ mapRegs(DFG.makeRegRef(DstOp.getReg(), Hexagon::subreg_hireg),
+ DFG.makeRegRef(HiOp.getReg(), HiOp.getSubReg()));
+ mapRegs(DFG.makeRegRef(DstOp.getReg(), Hexagon::subreg_loreg),
+ DFG.makeRegRef(LoOp.getReg(), LoOp.getSubReg()));
return true;
}
case Hexagon::A2_addi: {
case Hexagon::A2_tfr: {
const MachineOperand &DstOp = MI->getOperand(0);
const MachineOperand &SrcOp = MI->getOperand(1);
- mapRegs({ DstOp.getReg(), DstOp.getSubReg() },
- { SrcOp.getReg(), SrcOp.getSubReg() });
+ mapRegs(DFG.makeRegRef(DstOp.getReg(), DstOp.getSubReg()),
+ DFG.makeRegRef(SrcOp.getReg(), SrcOp.getSubReg()));
return true;
}
}
llvm_unreachable("Invalid operand");
};
DenseMap<NodeId,unsigned> OpMap;
- NodeList Refs = IA.Addr->members(getDFG());
+ DataFlowGraph &DFG = getDFG();
+ NodeList Refs = IA.Addr->members(DFG);
for (NodeAddr<RefNode*> RA : Refs)
OpMap.insert(std::make_pair(RA.Id, getOpNum(RA.Addr->getOp())));
for (NodeAddr<RefNode*> RA : Refs) {
unsigned N = OpMap[RA.Id];
if (N < OpNum)
- RA.Addr->setRegRef(&MI->getOperand(N));
+ RA.Addr->setRegRef(&MI->getOperand(N), DFG);
else if (N > OpNum)
- RA.Addr->setRegRef(&MI->getOperand(N-1));
+ RA.Addr->setRegRef(&MI->getOperand(N-1), DFG);
}
}
case TargetOpcode::COPY: {
const MachineOperand &Dst = MI->getOperand(0);
const MachineOperand &Src = MI->getOperand(1);
- RegisterRef DstR = { Dst.getReg(), Dst.getSubReg() };
- RegisterRef SrcR = { Src.getReg(), Src.getSubReg() };
- if (TargetRegisterInfo::isVirtualRegister(DstR.Reg)) {
- if (!TargetRegisterInfo::isVirtualRegister(SrcR.Reg))
- return false;
- MachineRegisterInfo &MRI = DFG.getMF().getRegInfo();
- if (MRI.getRegClass(DstR.Reg) != MRI.getRegClass(SrcR.Reg))
- return false;
- } else if (TargetRegisterInfo::isPhysicalRegister(DstR.Reg)) {
- if (!TargetRegisterInfo::isPhysicalRegister(SrcR.Reg))
- return false;
- const TargetRegisterInfo &TRI = DFG.getTRI();
- if (TRI.getMinimalPhysRegClass(DstR.Reg) !=
- TRI.getMinimalPhysRegClass(SrcR.Reg))
- return false;
- } else {
- // Copy between some unknown objects.
+ RegisterRef DstR = DFG.makeRegRef(Dst.getReg(), Dst.getSubReg());
+ RegisterRef SrcR = DFG.makeRegRef(Src.getReg(), Src.getSubReg());
+ assert(TargetRegisterInfo::isPhysicalRegister(DstR.Reg));
+ assert(TargetRegisterInfo::isPhysicalRegister(SrcR.Reg));
+ const TargetRegisterInfo &TRI = DFG.getTRI();
+ if (TRI.getMinimalPhysRegClass(DstR.Reg) !=
+ TRI.getMinimalPhysRegClass(SrcR.Reg))
return false;
- }
EM.insert(std::make_pair(DstR, SrcR));
return true;
}
- case TargetOpcode::REG_SEQUENCE: {
- const MachineOperand &Dst = MI->getOperand(0);
- RegisterRef DefR = { Dst.getReg(), Dst.getSubReg() };
- SmallVector<TargetInstrInfo::RegSubRegPairAndIdx,2> Inputs;
- const TargetInstrInfo &TII = DFG.getTII();
- if (!TII.getRegSequenceInputs(*MI, 0, Inputs))
- return false;
- for (auto I : Inputs) {
- unsigned S = DFG.getTRI().composeSubRegIndices(DefR.Sub, I.SubIdx);
- RegisterRef DR = { DefR.Reg, S };
- RegisterRef SR = { I.Reg, I.SubReg };
- EM.insert(std::make_pair(DR, SR));
- }
- return true;
- }
+ case TargetOpcode::REG_SEQUENCE:
+ llvm_unreachable("Unexpected REG_SEQUENCE");
}
return false;
}
void CopyPropagation::updateMap(NodeAddr<InstrNode*> IA) {
RegisterSet RRs;
for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG))
- RRs.insert(RA.Addr->getRegRef());
+ RRs.insert(RA.Addr->getRegRef(DFG));
bool Common = false;
for (auto &R : RDefMap) {
if (!RRs.count(R.first))
bool HasLimit = CpLimit.getNumOccurrences() > 0;
#endif
+ auto MinPhysReg = [this] (RegisterRef RR) -> unsigned {
+ const TargetRegisterInfo &TRI = DFG.getTRI();
+ const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(RR.Reg);
+ if ((RC.LaneMask & RR.Mask) == RC.LaneMask)
+ return RR.Reg;
+ for (MCSubRegIndexIterator S(RR.Reg, &TRI); S.isValid(); ++S)
+ if (RR.Mask == TRI.getSubRegIndexLaneMask(S.getSubRegIndex()))
+ return S.getSubReg();
+ llvm_unreachable("Should have found a register");
+ return 0;
+ };
+
for (auto C : Copies) {
#ifndef NDEBUG
if (HasLimit && CpCount >= CpLimit)
EqualityMap &EM = FS->second;
for (NodeAddr<DefNode*> DA : SA.Addr->members_if(DFG.IsDef, DFG)) {
- RegisterRef DR = DA.Addr->getRegRef();
+ RegisterRef DR = DA.Addr->getRegRef(DFG);
auto FR = EM.find(DR);
if (FR == EM.end())
continue;
uint16_t F = UA.Addr->getFlags();
if ((F & NodeAttrs::PhiRef) || (F & NodeAttrs::Fixed))
continue;
- if (UA.Addr->getRegRef() != DR)
+ if (UA.Addr->getRegRef(DFG) != DR)
continue;
NodeAddr<InstrNode*> IA = UA.Addr->getOwner(DFG);
<< *NodeAddr<StmtNode*>(IA).Addr->getCode();
}
- Op.setReg(SR.Reg);
- Op.setSubReg(SR.Sub);
+ unsigned NewReg = MinPhysReg(SR);
+ Op.setReg(NewReg);
+ Op.setSubReg(0);
DFG.unlinkUse(UA, false);
if (RDefSR_SA != 0) {
UA.Addr->linkToDef(UA.Id, DFG.addr<DefNode*>(RDefSR_SA));
bool run();
void trace(bool On) { Trace = On; }
bool trace() const { return Trace; }
+ DataFlowGraph &getDFG() { return DFG; }
typedef std::map<RegisterRef, RegisterRef> EqualityMap;
virtual bool interpretAsCopy(const MachineInstr *MI, EqualityMap &EM);
OS << TRI.getName(P.Obj.Reg);
else
OS << '#' << P.Obj.Reg;
- if (P.Obj.Sub != 0) {
- LaneBitmask LM = P.G.getLMI().getLaneMaskForIndex(P.Obj.Sub);
- OS << ":L" << PrintLaneMask(LM);
- }
+ if (P.Obj.Mask != ~LaneBitmask(0))
+ OS << ":" << PrintLaneMask(P.Obj.Mask);
return OS;
}
void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA,
const DataFlowGraph &G) {
OS << Print<NodeId>(RA.Id, G) << '<'
- << Print<RegisterRef>(RA.Addr->getRegRef(), G) << '>';
+ << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>';
if (RA.Addr->getFlags() & NodeAttrs::Fixed)
OS << '!';
}
const Print<DataFlowGraph::DefStack> &P) {
for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) {
OS << Print<NodeId>(I->Id, P.G)
- << '<' << Print<RegisterRef>(I->Addr->getRegRef(), P.G) << '>';
+ << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>';
I.down();
if (I != E)
OS << ' ';
// Fundamental node manipulator functions.
// Obtain the register reference from a reference node.
-RegisterRef RefNode::getRegRef() const {
+RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const {
assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)
- return Ref.RR;
+ return G.unpack(Ref.PR);
assert(Ref.Op != nullptr);
- return { Ref.Op->getReg(), Ref.Op->getSubReg() };
+ return G.makeRegRef(Ref.Op->getReg(), Ref.Op->getSubReg());
}
// Set the register reference in the reference node directly (for references
// in phi nodes).
-void RefNode::setRegRef(RegisterRef RR) {
+void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) {
assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef);
- Ref.RR = RR;
+ Ref.PR = G.pack(RR);
}
// Set the register reference in the reference node based on a machine
// operand (for references in statement nodes).
-void RefNode::setRegRef(MachineOperand *Op) {
+void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) {
assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef));
+ (void)G;
Ref.Op = Op;
}
}
-uint32_t RegisterAggr::getLargestSuperReg(uint32_t Reg) const {
- uint32_t SuperReg = Reg;
+RegisterRef RegisterAggr::normalize(RegisterRef RR) const {
+ uint32_t SuperReg = RR.Reg;
while (true) {
MCSuperRegIterator SR(SuperReg, &TRI, false);
if (!SR.isValid())
- return SuperReg;
+ break;
SuperReg = *SR;
}
- llvm_unreachable(nullptr);
-}
-LaneBitmask RegisterAggr::composeMaskForReg(uint32_t Reg, LaneBitmask LM,
- uint32_t SuperR) const {
- uint32_t SubR = TRI.getSubRegIndex(SuperR, Reg);
- const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(Reg);
- return TRI.composeSubRegIndexLaneMask(SubR, LM & RC.LaneMask);
-}
-
-void RegisterAggr::setMaskRaw(uint32_t Reg, LaneBitmask LM) {
- uint32_t SuperR = getLargestSuperReg(Reg);
- LaneBitmask SuperM = composeMaskForReg(Reg, LM, SuperR);
- auto F = Masks.find(SuperR);
- if (F == Masks.end())
- Masks.insert({SuperR, SuperM});
- else
- F->second |= SuperM;
-
- // Visit all register units to see if there are any that were created
- // by explicit aliases. Add those that were to the bit vector.
- for (MCRegUnitIterator U(Reg, &TRI); U.isValid(); ++U) {
- MCRegUnitRootIterator R(*U, &TRI);
- ++R;
- if (!R.isValid())
- continue;
- ExpAliasUnits.set(*U);
- CheckUnits = true;
- }
+ uint32_t Sub = TRI.getSubRegIndex(SuperReg, RR.Reg);
+ const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(RR.Reg);
+ LaneBitmask SuperMask = RR.Mask &
+ TRI.composeSubRegIndexLaneMask(Sub, RC.LaneMask);
+ return RegisterRef(SuperReg, SuperMask);
}
bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
- uint32_t SuperR = getLargestSuperReg(RR.Reg);
- auto F = Masks.find(SuperR);
+ RegisterRef NR = normalize(RR);
+ auto F = Masks.find(NR.Reg);
if (F != Masks.end()) {
- LaneBitmask M = LMI.getLaneMaskForIndex(RR.Sub);
- if (F->second & composeMaskForReg(RR.Reg, M, SuperR))
+ if (F->second & NR.Mask)
return true;
}
if (CheckUnits) {
}
bool RegisterAggr::hasCoverOf(RegisterRef RR) const {
- uint32_t SuperR = getLargestSuperReg(RR.Reg);
- auto F = Masks.find(SuperR);
+ // Always have a cover for empty lane mask.
+ RegisterRef NR = normalize(RR);
+ if (!NR.Mask)
+ return true;
+ auto F = Masks.find(NR.Reg);
if (F == Masks.end())
return false;
- LaneBitmask M = LMI.getLaneMaskForIndex(RR.Sub);
- LaneBitmask SuperM = composeMaskForReg(RR.Reg, M, SuperR);
- return (SuperM & F->second) == SuperM;
+ return (NR.Mask & F->second) == NR.Mask;
}
RegisterAggr &RegisterAggr::insert(RegisterRef RR) {
- setMaskRaw(RR.Reg, LMI.getLaneMaskForIndex(RR.Sub));
+ RegisterRef NR = normalize(RR);
+ auto F = Masks.find(NR.Reg);
+ if (F == Masks.end())
+ Masks.insert({NR.Reg, NR.Mask});
+ else
+ F->second |= NR.Mask;
+
+ // Visit all register units to see if there are any that were created
+ // by explicit aliases. Add those that were to the bit vector.
+ for (MCRegUnitIterator U(RR.Reg, &TRI); U.isValid(); ++U) {
+ MCRegUnitRootIterator R(*U, &TRI);
+ ++R;
+ if (!R.isValid())
+ continue;
+ ExpAliasUnits.set(*U);
+ CheckUnits = true;
+ }
return *this;
}
RegisterAggr &RegisterAggr::insert(const RegisterAggr &RG) {
for (std::pair<uint32_t,LaneBitmask> P : RG.Masks)
- setMaskRaw(P.first, P.second);
+ insert(RegisterRef(P.first, P.second));
return *this;
}
RegisterAggr &RegisterAggr::clear(RegisterRef RR) {
- uint32_t SuperR = getLargestSuperReg(RR.Reg);
- auto F = Masks.find(SuperR);
+ RegisterRef NR = normalize(RR);
+ auto F = Masks.find(NR.Reg);
if (F == Masks.end())
return *this;
- LaneBitmask M = LMI.getLaneMaskForIndex(RR.Sub);
- LaneBitmask NewM = F->second & ~composeMaskForReg(RR.Reg, M, SuperR);
+ LaneBitmask NewM = F->second & ~NR.Mask;
if (NewM == LaneBitmask(0))
Masks.erase(F);
else
DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi)
- : TimeG("rdf"), MF(mf), TII(tii), TRI(tri), MDT(mdt), MDF(mdf), TOI(toi) {
+ : TimeG("rdf"), LMI(), MF(mf), TII(tii), TRI(tri), MDT(mdt), MDF(mdf),
+ TOI(toi) {
}
// Get the list of references aliased to RR. Lane masks are ignored.
RegisterSet DataFlowGraph::getAliasSet(uint32_t Reg) const {
- // Do not include RR in the alias set. For virtual registers return an
- // empty set.
+ // Do not include RR in the alias set.
RegisterSet AS;
- if (TargetRegisterInfo::isVirtualRegister(Reg))
- return AS;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
for (MCRegAliasIterator AI(Reg, &TRI, false); AI.isValid(); ++AI)
- AS.insert({*AI,0});
+ AS.insert(RegisterRef(*AI));
return AS;
}
: nullptr;
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
if (uint32_t R = TLI.getExceptionPointerRegister(PF))
- LR.insert({R,0});
+ LR.insert(RegisterRef(R));
if (uint32_t R = TLI.getExceptionSelectorRegister(PF))
- LR.insert({R,0});
+ LR.insert(RegisterRef(R));
return LR;
}
NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner,
MachineOperand &Op, uint16_t Flags) {
NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
- UA.Addr->setRegRef(&Op);
+ UA.Addr->setRegRef(&Op, *this);
return UA;
}
RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) {
NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
assert(Flags & NodeAttrs::PhiRef);
- PUA.Addr->setRegRef(RR);
+ PUA.Addr->setRegRef(RR, *this);
PUA.Addr->setPredecessor(PredB.Id);
return PUA;
}
NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
MachineOperand &Op, uint16_t Flags) {
NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
- DA.Addr->setRegRef(&Op);
+ DA.Addr->setRegRef(&Op, *this);
return DA;
}
RegisterRef RR, uint16_t Flags) {
NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
assert(Flags & NodeAttrs::PhiRef);
- DA.Addr->setRegRef(RR);
+ DA.Addr->setRegRef(RR, *this);
return DA;
}
MachineRegisterInfo &MRI = MF.getRegInfo();
for (auto I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) {
NodeAddr<PhiNode*> PA = newPhi(EA);
- RegisterRef RR = { I->first, 0 };
+ RegisterRef RR = RegisterRef(I->first);
uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
PA.Addr->addMember(DA, *this);
removeUnusedPhis();
}
+RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Sub == 0 && "Not expecting subregisters");
+ // TODO handle subregisters just to see if LMI would work with it.
+ return RegisterRef(Reg);
+}
+
// For each stack in the map DefM, push the delimiter for block B on it.
void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) {
// Push block delimiters.
NodeList Rel = getRelatedRefs(IA, DA);
NodeAddr<DefNode*> PDA = Rel.front();
- RegisterRef RR = PDA.Addr->getRegRef();
+ RegisterRef RR = PDA.Addr->getRegRef(*this);
#ifndef NDEBUG
// Assert if the register is defined in two or more unrelated defs.
// This could happen if there are two or more def operands defining it.
// Return true if RA and RB overlap, false otherwise.
bool DataFlowGraph::alias(RegisterRef RA, RegisterRef RB) const {
- // Handling of physical registers.
- bool IsPhysA = TargetRegisterInfo::isPhysicalRegister(RA.Reg);
- bool IsPhysB = TargetRegisterInfo::isPhysicalRegister(RB.Reg);
- if (IsPhysA != IsPhysB)
- return false;
- if (IsPhysA) {
- LaneBitmask LA = LMI.getLaneMaskForIndex(RA.Sub);
- LaneBitmask LB = LMI.getLaneMaskForIndex(RB.Sub);
-
- MCRegUnitMaskIterator UMA(RA.Reg, &TRI);
- MCRegUnitMaskIterator UMB(RB.Reg, &TRI);
- // Reg units are returned in the numerical order.
- while (UMA.isValid() && UMB.isValid()) {
- std::pair<uint32_t,LaneBitmask> PA = *UMA;
- std::pair<uint32_t,LaneBitmask> PB = *UMB;
- // If the returned lane mask is 0, it should be treated as ~0
- // (or the lane mask from the given register ref should be ignored).
- // This can happen when a register has only one unit.
- if (PA.first < PB.first || (PA.second && !(PA.second & LA)))
- ++UMA;
- else if (PB.first < PA.first || (PB.second && !(PB.second & LB)))
- ++UMB;
- else
+ assert(TargetRegisterInfo::isPhysicalRegister(RA.Reg));
+ assert(TargetRegisterInfo::isPhysicalRegister(RB.Reg));
+
+ MCRegUnitMaskIterator UMA(RA.Reg, &TRI);
+ MCRegUnitMaskIterator UMB(RB.Reg, &TRI);
+ // Reg units are returned in the numerical order.
+ while (UMA.isValid() && UMB.isValid()) {
+ std::pair<uint32_t,LaneBitmask> PA = *UMA;
+ std::pair<uint32_t,LaneBitmask> PB = *UMB;
+ // If the returned lane mask is 0, it should be treated as ~0
+ // (or the lane mask from the given register ref should be ignored).
+ // This can happen when a register has only one unit.
+ if (PA.first == PB.first) {
+ if (!PA.second || !PB.second || (PA.second & PB.second))
return true;
+ ++UMA;
+ ++UMB;
+ continue;
}
- return false;
+ if (PA.first < PB.first)
+ ++UMA;
+ else if (PB.first < PA.first)
+ ++UMB;
}
-
- // Handling of virtual registers.
- bool IsVirtA = TargetRegisterInfo::isVirtualRegister(RA.Reg);
- bool IsVirtB = TargetRegisterInfo::isVirtualRegister(RB.Reg);
- if (IsVirtA != IsVirtB)
- return false;
- if (IsVirtA) {
- if (RA.Reg != RB.Reg)
- return false;
- // RA and RB refer to the same register. If any of them refer to the
- // whole register, they must be aliased.
- if (RA.Sub == 0 || RB.Sub == 0)
- return true;
- unsigned SA = TRI.getSubRegIdxSize(RA.Sub);
- unsigned OA = TRI.getSubRegIdxOffset(RA.Sub);
- unsigned SB = TRI.getSubRegIdxSize(RB.Sub);
- unsigned OB = TRI.getSubRegIdxOffset(RB.Sub);
- if (OA <= OB && OA+SA > OB)
- return true;
- if (OB <= OA && OB+SB > OA)
- return true;
- return false;
- }
-
return false;
}
NodeAddr<RefNode*> RA) const {
assert(IA.Id != 0 && RA.Id != 0);
- auto Related = [RA](NodeAddr<RefNode*> TA) -> bool {
+ auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool {
if (TA.Addr->getKind() != RA.Addr->getKind())
return false;
- if (TA.Addr->getRegRef() != RA.Addr->getRegRef())
+ if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this))
return false;
return true;
};
return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor();
};
- RegisterRef RR = RA.Addr->getRegRef();
+ RegisterRef RR = RA.Addr->getRegRef(*this);
if (IA.Addr->getKind() == NodeAttrs::Stmt)
return RA.Addr->getNextRef(RR, RelatedStmt, true, *this);
return RA.Addr->getNextRef(RR, RelatedPhi, true, *this);
for (const MachineOperand &UseOp : In.operands()) {
if (!UseOp.isReg() || !UseOp.isUse() || UseOp.isUndef())
continue;
- RegisterRef UR = { UseOp.getReg(), UseOp.getSubReg() };
+ RegisterRef UR = makeRegRef(UseOp.getReg(), UseOp.getSubReg());
if (alias(DR, UR))
return false;
}
RegisterSet ImpUses, ImpDefs;
if (const uint16_t *ImpD = In.getDesc().getImplicitDefs())
while (uint16_t R = *ImpD++)
- ImpDefs.insert({R, 0});
+ ImpDefs.insert(RegisterRef(R));
if (const uint16_t *ImpU = In.getDesc().getImplicitUses())
while (uint16_t R = *ImpU++)
- ImpUses.insert({R, 0});
+ ImpUses.insert(RegisterRef(R));
bool IsCall = isCall(In);
bool NeedsImplicit = IsCall || In.isInlineAsm() || In.isReturn();
MachineOperand &Op = In.getOperand(OpN);
if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
continue;
- RegisterRef RR = { Op.getReg(), Op.getSubReg() };
+ RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg());
uint16_t Flags = NodeAttrs::None;
if (TOI.isPreserving(In, OpN)) {
Flags |= NodeAttrs::Preserving;
MachineOperand &Op = In.getOperand(OpN);
if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
continue;
- RegisterRef RR = { Op.getReg(), Op.getSubReg() };
+ RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg());
if (!NeedsImplicit && !ImpDefs.count(RR))
continue;
if (DoneDefs.count(RR))
MachineOperand &Op = In.getOperand(OpN);
if (!Op.isReg() || !Op.isUse())
continue;
- RegisterRef RR = { Op.getReg(), Op.getSubReg() };
+ RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg());
// Add implicit uses on return and call instructions, and on predicated
// instructions regardless of whether or not they appear in the instruction
// descriptor's list.
for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
for (NodeAddr<RefNode*> RA : IA.Addr->members(*this))
- Refs.insert(RA.Addr->getRegRef());
+ Refs.insert(RA.Addr->getRegRef(*this));
}
// Scan all defs in the block node BA and record in PhiM the locations of
RegisterSet Defs;
for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this))
- Defs.insert(RA.Addr->getRegRef());
+ Defs.insert(RA.Addr->getRegRef(*this));
// Calculate the iterated dominance frontier of BB.
const MachineDominanceFrontier::DomSetType &DF = DFLoc->second;
auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef {
for (RegisterRef I : RRs)
- if (I != RR && RegisterAggr::isCoverOf(I, RR, LMI, TRI))
+ if (I != RR && RegisterAggr::isCoverOf(I, RR, TRI))
RR = I;
return RR;
};
DefStack &DS) {
if (DS.empty())
return;
- RegisterRef RR = TA.Addr->getRegRef();
+ RegisterRef RR = TA.Addr->getRegRef(*this);
NodeAddr<T> TAP;
// References from the def stack that have been examined so far.
- RegisterAggr Defs(LMI, TRI);
+ RegisterAggr Defs(TRI);
for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) {
- RegisterRef QR = I->Addr->getRegRef();
+ RegisterRef QR = I->Addr->getRegRef(*this);
// Skip all defs that are aliased to any of the defs that we have already
// seen. If this completes a cover of RR, stop the stack traversal.
for (NodeAddr<RefNode*> RA : SA.Addr->members(*this)) {
uint16_t Kind = RA.Addr->getKind();
assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use);
- RegisterRef RR = RA.Addr->getRegRef();
+ RegisterRef RR = RA.Addr->getRegRef(*this);
#ifndef NDEBUG
// Do not expect multiple defs of the same reference.
assert(Kind != NodeAttrs::Def || !Defs.count(RR));
// Find what register this phi is for.
NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this);
assert(RA.Id != 0);
- if (EHLiveIns.count(RA.Addr->getRegRef()))
+ if (EHLiveIns.count(RA.Addr->getRegRef(*this)))
continue;
}
// Go over each phi use associated with MBB, and link it.
for (auto U : IA.Addr->members_if(IsUseForBA, *this)) {
NodeAddr<PhiUseNode*> PUA = U;
- RegisterRef RR = PUA.Addr->getRegRef();
+ RegisterRef RR = PUA.Addr->getRegRef(*this);
linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]);
}
}
namespace rdf {
typedef uint32_t NodeId;
+ typedef uint32_t RegisterId;
struct DataFlowGraph;
};
struct RegisterRef {
- // For virtual registers, Reg and Sub have the usual meanings.
- //
- // Physical registers are assumed not to have any subregisters, and for
- // them, Sub is the key of the LaneBitmask in the lane mask map in DFG.
- // The case of Sub = 0 is treated as 'all lanes', i.e. lane mask of ~0.
- // Use an key/map to access lane masks, since we only have uint32_t
- // for it, and the LaneBitmask type can grow in the future.
- //
- // The case when Reg = 0 and Sub = 0 is reserved to mean "no register".
- uint32_t Reg, Sub;
-
- // No non-trivial constructors, since this will be a member of a union.
- RegisterRef() = default;
+ RegisterId Reg;
+ LaneBitmask Mask;
+
+ RegisterRef() : RegisterRef(0) {}
+ explicit RegisterRef(RegisterId R, LaneBitmask M = ~LaneBitmask(0))
+ : Reg(R), Mask(M) {}
RegisterRef(const RegisterRef &RR) = default;
RegisterRef &operator= (const RegisterRef &RR) = default;
bool operator== (const RegisterRef &RR) const {
- return Reg == RR.Reg && Sub == RR.Sub;
+ return Reg == RR.Reg && Mask == RR.Mask;
}
bool operator!= (const RegisterRef &RR) const {
return !operator==(RR);
}
bool operator< (const RegisterRef &RR) const {
- return Reg < RR.Reg || (Reg == RR.Reg && Sub < RR.Sub);
+ return Reg < RR.Reg || (Reg == RR.Reg && Mask < RR.Mask);
}
};
typedef std::set<RegisterRef> RegisterSet;
const TargetInstrInfo &TII;
};
+
+ // Packed register reference. Only used for storage.
+ struct PackedRegisterRef {
+ RegisterId Reg;
+ uint32_t MaskId;
+ };
+
// Template class for a map translating uint32_t into arbitrary types.
// The map will act like an indexed set: upon insertion of a new object,
// it will automatically assign a new index to it. Index of 0 is treated
template <typename T, unsigned N = 32>
struct IndexedSet {
IndexedSet() : Map() { Map.reserve(N); }
- const T get(uint32_t Idx) const {
+ T get(uint32_t Idx) const {
// Index Idx corresponds to Map[Idx-1].
assert(Idx != 0 && !Map.empty() && Idx-1 < Map.size());
return Map[Idx-1];
}
uint32_t insert(T Val) {
// Linear search.
- auto F = find(Map, Val);
+ auto F = llvm::find(Map, Val);
if (F != Map.end())
- return F - Map.begin();
+ return F - Map.begin() + 1;
Map.push_back(Val);
return Map.size(); // Return actual_index + 1.
}
-
+ uint32_t find(T Val) const {
+ auto F = llvm::find(Map, Val);
+ assert(F != Map.end());
+ return *F;
+ }
private:
std::vector<T> Map;
};
struct LaneMaskIndex : private IndexedSet<LaneBitmask> {
+ LaneMaskIndex() = default;
+
LaneBitmask getLaneMaskForIndex(uint32_t K) const {
return K == 0 ? ~LaneBitmask(0) : get(K);
}
assert(LM != LaneBitmask(0));
return LM == ~LaneBitmask(0) ? 0 : insert(LM);
}
+ uint32_t getIndexForLaneMask(LaneBitmask LM) const {
+ assert(LM != LaneBitmask(0));
+ return LM == ~LaneBitmask(0) ? 0 : find(LM);
+ }
+ PackedRegisterRef pack(RegisterRef RR) {
+ return { RR.Reg, getIndexForLaneMask(RR.Mask) };
+ }
+ PackedRegisterRef pack(RegisterRef RR) const {
+ return { RR.Reg, getIndexForLaneMask(RR.Mask) };
+ }
+ RegisterRef unpack(PackedRegisterRef PR) const {
+ return RegisterRef(PR.Reg, getLaneMaskForIndex(PR.MaskId));
+ }
};
struct RegisterAggr {
- typedef std::pair<uint32_t,LaneBitmask> ValueType;
-
- RegisterAggr(const LaneMaskIndex &m, const TargetRegisterInfo &tri)
+ RegisterAggr(const TargetRegisterInfo &tri)
: Masks(), ExpAliasUnits(tri.getNumRegUnits()), CheckUnits(false),
- LMI(m), TRI(tri) {}
+ TRI(tri) {}
RegisterAggr(const RegisterAggr &RG)
: Masks(RG.Masks), ExpAliasUnits(RG.ExpAliasUnits),
- CheckUnits(RG.CheckUnits), LMI(RG.LMI), TRI(RG.TRI) {}
+ CheckUnits(RG.CheckUnits), TRI(RG.TRI) {}
bool empty() const { return Masks.empty(); }
bool hasAliasOf(RegisterRef RR) const;
bool hasCoverOf(RegisterRef RR) const;
- static bool isCoverOf(RegisterRef RefA, RegisterRef RefB,
- const LaneMaskIndex &LMI, const TargetRegisterInfo &TRI) {
- return RegisterAggr(LMI, TRI).insert(RefA).hasCoverOf(RefB);
+ static bool isCoverOf(RegisterRef RA, RegisterRef RB,
+ const TargetRegisterInfo &TRI) {
+ return RegisterAggr(TRI).insert(RA).hasCoverOf(RB);
}
RegisterAggr &insert(RegisterRef RR);
void print(raw_ostream &OS) const;
private:
- typedef std::unordered_map<ValueType::first_type,
- ValueType::second_type> MapType;
+ typedef std::unordered_map<RegisterId, LaneBitmask> MapType;
+
+ public:
+ typedef MapType::const_iterator iterator;
+ iterator begin() const { return Masks.begin(); }
+ iterator end() const { return Masks.end(); }
+ RegisterRef normalize(RegisterRef RR) const;
+
+ private:
MapType Masks;
BitVector ExpAliasUnits; // Register units for explicit aliases.
bool CheckUnits;
- const LaneMaskIndex &LMI;
const TargetRegisterInfo &TRI;
-
- uint32_t getLargestSuperReg(uint32_t Reg) const;
- void setMaskRaw(uint32_t Reg, LaneBitmask LM);
- LaneBitmask composeMaskForReg(uint32_t Reg, LaneBitmask LM,
- uint32_t SuperR) const;
};
};
union {
MachineOperand *Op; // Non-phi refs point to a machine operand.
- RegisterRef RR; // Phi refs store register info directly.
+ PackedRegisterRef PR; // Phi refs store register info directly.
};
};
struct RefNode : public NodeBase {
RefNode() = default;
- RegisterRef getRegRef() const;
+ RegisterRef getRegRef(const DataFlowGraph &G) const;
MachineOperand &getOp() {
assert(!(getFlags() & NodeAttrs::PhiRef));
return *Ref.Op;
}
- void setRegRef(RegisterRef RR);
- void setRegRef(MachineOperand *Op);
+ void setRegRef(RegisterRef RR, DataFlowGraph &G);
+ void setRegRef(MachineOperand *Op, DataFlowGraph &G);
NodeId getReachingDef() const {
return Ref.RD;
}
NodeAddr<FuncNode*> getFunc() const { return Func; }
MachineFunction &getMF() const { return MF; }
- LaneMaskIndex &getLMI() { return LMI; }
- const LaneMaskIndex &getLMI() const { return LMI; }
const TargetInstrInfo &getTII() const { return TII; }
const TargetRegisterInfo &getTRI() const { return TRI; }
const MachineDominatorTree &getDT() const { return MDT; }
void markBlock(NodeId B, DefStackMap &DefM);
void releaseBlock(NodeId B, DefStackMap &DefM);
+ PackedRegisterRef pack(RegisterRef RR) { return LMI.pack(RR); }
+ PackedRegisterRef pack(RegisterRef RR) const { return LMI.pack(RR); }
+ RegisterRef unpack(PackedRegisterRef PR) const { return LMI.unpack(PR); }
+ RegisterRef makeRegRef(unsigned Reg, unsigned Sub) const;
+
NodeAddr<RefNode*> getNextRelated(NodeAddr<InstrNode*> IA,
NodeAddr<RefNode*> RA) const;
NodeAddr<RefNode*> getNextImp(NodeAddr<InstrNode*> IA,
while (NA.Addr != this) {
if (NA.Addr->getType() == NodeAttrs::Ref) {
NodeAddr<RefNode*> RA = NA;
- if (RA.Addr->getRegRef() == RR && P(NA))
+ if (RA.Addr->getRegRef(G) == RR && P(NA))
return NA;
if (NextOnly)
break;
if (TA.Addr->getFlags() & NodeAttrs::PhiRef)
continue;
// Stop at the covering/overwriting def of the initial register reference.
- RegisterRef RR = TA.Addr->getRegRef();
+ RegisterRef RR = TA.Addr->getRegRef(DFG);
if (!DFG.IsPreservingDef(TA))
- if (RegisterAggr::isCoverOf(RR, RefRR, DFG.getLMI(), TRI))
+ if (RegisterAggr::isCoverOf(RR, RefRR, TRI))
continue;
// Get the next level of reaching defs. This will include multiple
// reaching defs for shadows.
for (NodeId N : DefQ) {
auto TA = DFG.addr<DefNode*>(N);
bool IsPhi = TA.Addr->getFlags() & NodeAttrs::PhiRef;
- if (!IsPhi && !DFG.alias(RefRR, TA.Addr->getRegRef()))
+ if (!IsPhi && !DFG.alias(RefRR, TA.Addr->getRegRef(DFG)))
continue;
Defs.insert(TA.Id);
Owners.insert(TA.Addr->getOwner(DFG).Id);
bool IsPhi = DFG.IsCode<NodeAttrs::Phi>(TA);
NodeList Ds;
for (NodeAddr<DefNode*> DA : TA.Addr->members_if(DefInSet, DFG)) {
- RegisterRef QR = DA.Addr->getRegRef();
+ RegisterRef QR = DA.Addr->getRegRef(DFG);
// Add phi defs even if they are covered by subsequent defs. This is
// for cases where the reached use is not covered by any of the defs
// encountered so far: the phi def is needed to expose the liveness
uint16_t Flags = DA.Addr->getFlags();
if (!FullChain || !(Flags & NodeAttrs::PhiRef))
if (!(Flags & NodeAttrs::Preserving)) // Don't care about Undef here.
- RRs.insert(DA.Addr->getRegRef());
+ RRs.insert(DA.Addr->getRegRef(DFG));
}
}
NodeAddr<RefNode*> RefA, NodeSet &Visited, const NodeSet &Defs) {
// Collect all defined registers. Do not consider phis to be defining
// anything, only collect "real" definitions.
- RegisterAggr DefRRs(DFG.getLMI(), TRI);
+ RegisterAggr DefRRs(TRI);
for (NodeId D : Defs) {
const auto DA = DFG.addr<const DefNode*>(D);
if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
- DefRRs.insert(DA.Addr->getRegRef());
+ DefRRs.insert(DA.Addr->getRegRef(DFG));
}
NodeList RDs = getAllReachingDefs(RefRR, RefA, true, DefRRs);
while (U != 0) {
auto UA = DFG.addr<UseNode*>(U);
if (!(UA.Addr->getFlags() & NodeAttrs::Undef)) {
- RegisterRef UR = UA.Addr->getRegRef();
+ RegisterRef UR = UA.Addr->getRegRef(DFG);
if (DFG.alias(RefRR, UR) && !DefRRs.hasCoverOf(UR))
Uses.insert(U);
}
for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) {
auto DA = DFG.addr<DefNode*>(D);
NextD = DA.Addr->getSibling();
- RegisterRef DR = DA.Addr->getRegRef();
+ RegisterRef DR = DA.Addr->getRegRef(DFG);
// If this def is already covered, it cannot reach anything new.
// Similarly, skip it if it is not aliased to the interesting register.
if (DefRRs.hasCoverOf(DR) || !DFG.alias(RefRR, DR))
for (NodeAddr<UseNode*> VA : DFG.getRelatedRefs(PhiA, UA)) {
SeenUses.insert(VA.Id);
- RegisterAggr DefRRs(DFG.getLMI(), TRI);
+ RegisterAggr DefRRs(TRI);
for (NodeAddr<DefNode*> DA : getAllReachingDefs(VA)) {
if (DA.Addr->getFlags() & NodeAttrs::PhiRef) {
NodeId RP = DA.Addr->getOwner(DFG).Id;
else
F->second.insert(DefRRs);
}
- DefRRs.insert(DA.Addr->getRegRef());
+ DefRRs.insert(DA.Addr->getRegRef(DFG));
}
}
}
NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first);
NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG);
if (!Ds.empty()) {
- RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef();
+ RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG);
dbgs() << '<' << Print<RegisterRef>(RR, DFG) << '>';
} else {
dbgs() << "<noreg>";
// Add function live-ins to the live-in set of the function entry block.
auto &EntryIn = LiveMap[&MF.front()];
for (auto I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I)
- EntryIn.insert({I->first,0});
+ EntryIn.insert(RegisterRef(I->first));
if (Trace) {
// Dump the liveness map
LV.set(I->PhysReg);
dbgs() << "BB#" << B.getNumber() << "\t rec = {";
for (int x = LV.find_first(); x >= 0; x = LV.find_next(x))
- dbgs() << ' ' << Print<RegisterRef>({unsigned(x),0}, DFG);
+ dbgs() << ' ' << Print<RegisterRef>(RegisterRef(x), DFG);
dbgs() << " }\n";
dbgs() << "\tcomp = " << Print<RegisterSet>(LiveMap[&B], DFG) << '\n';
}
// Add the newly computed live-ins.
auto &LiveIns = LiveMap[&B];
for (auto I : LiveIns) {
- assert(I.Sub == 0);
- B.addLiveIn(I.Reg);
+ B.addLiveIn({MCPhysReg(I.Reg), I.Mask});
}
}
}
assert(RD);
RA = DFG.addr<DefNode*>(RD);
}
- return RA.Addr->getRegRef();
+ return RA.Addr->getRegRef(DFG);
}
// propagated upwards. This only applies to non-preserving defs,
// and to the parts of the register actually covered by those defs.
// (Note that phi defs should always be preserving.)
- RegisterAggr RRs(DFG.getLMI(), TRI);
+ RegisterAggr RRs(TRI);
if (!DFG.IsPreservingDef(DA)) {
assert(!(IA.Addr->getFlags() & NodeAttrs::Phi));
// that is also located in this block. LRef is a register ref
// whose use this def reaches. If DA covers LRef, then no part
// of LRef is exposed upwards.
- if (RRs.insert(DA.Addr->getRegRef()).hasCoverOf(LRef))
+ if (RRs.insert(DA.Addr->getRegRef(DFG)).hasCoverOf(LRef))
continue;
}
// TA is in B. Only add this def to the accumulated cover if it is
// not preserving.
if (!(TA.Addr->getFlags() & NodeAttrs::Preserving))
- RRs.insert(TA.Addr->getRegRef());
+ RRs.insert(TA.Addr->getRegRef(DFG));
// If this is enough to cover LRef, then stop.
if (RRs.hasCoverOf(LRef))
break;
if (IA.Addr->getKind() != NodeAttrs::Stmt)
continue;
for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) {
- RegisterRef RR = UA.Addr->getRegRef();
+ RegisterRef RR = UA.Addr->getRegRef(DFG);
if (UA.Addr->getFlags() & NodeAttrs::Undef)
continue;
for (NodeAddr<DefNode*> D : getAllReachingDefs(UA))
Liveness(MachineRegisterInfo &mri, const DataFlowGraph &g)
: DFG(g), TRI(g.getTRI()), MDT(g.getDT()), MDF(g.getDF()),
- MRI(mri), Empty(), NoRegs(g.getLMI(), g.getTRI()),
- Trace(false) {}
+ MRI(mri), Empty(), NoRegs(g.getTRI()), Trace(false) {}
NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
bool FullChain, const RegisterAggr &DefRRs);
NodeList getAllReachingDefs(NodeAddr<RefNode*> RefA) {
- return getAllReachingDefs(RefA.Addr->getRegRef(), RefA, false, NoRegs);
+ return getAllReachingDefs(RefA.Addr->getRegRef(DFG), RefA, false, NoRegs);
}
NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr<RefNode*> RefA) {
return getAllReachingDefs(RefRR, RefA, false, NoRegs);