/// scheduling model.
virtual void postProcessInstruction(std::unique_ptr<Instruction> &Inst,
const MCInst &MCI) {}
+
+ // The resetState() method gets invoked at the beginning of each code region
+ // so that targets that override this function can clear any state that they
+ // have left from the previous code region.
+ virtual void resetState() {}
};
/// Class which can be overriden by targets to enforce instruction
// subtarget when computing the reciprocal throughput.
unsigned SchedClassID;
- unsigned MayLoad : 1;
- unsigned MayStore : 1;
- unsigned HasSideEffects : 1;
- unsigned BeginGroup : 1;
- unsigned EndGroup : 1;
- unsigned RetireOOO : 1;
-
// True if all buffered resources are in-order, and there is at least one
// buffer which is a dispatch hazard (BufferSize = 0).
unsigned MustIssueImmediately : 1;
unsigned Opcode;
// Flags used by the LSUnit.
- bool IsALoadBarrier;
- bool IsAStoreBarrier;
+ bool IsALoadBarrier : 1;
+ bool IsAStoreBarrier : 1;
+ // Flags copied from the InstrDesc and potentially modified by
+ // CustomBehaviour or (more likely) InstrPostProcess.
+ bool MayLoad : 1;
+ bool MayStore : 1;
+ bool HasSideEffects : 1;
+ bool BeginGroup : 1;
+ bool EndGroup : 1;
+ bool RetireOOO : 1;
public:
InstructionBase(const InstrDesc &D, const unsigned Opcode)
// Returns true if this instruction is a candidate for move elimination.
bool isOptimizableMove() const { return IsOptimizableMove; }
void setOptimizableMove() { IsOptimizableMove = true; }
- bool isMemOp() const { return Desc.MayLoad || Desc.MayStore; }
+ bool isMemOp() const { return MayLoad || MayStore; }
+
+ // Getters and setters for general instruction flags.
+ void setMayLoad(bool newVal) { MayLoad = newVal; }
+ void setMayStore(bool newVal) { MayStore = newVal; }
+ void setHasSideEffects(bool newVal) { HasSideEffects = newVal; }
+ void setBeginGroup(bool newVal) { BeginGroup = newVal; }
+ void setEndGroup(bool newVal) { EndGroup = newVal; }
+ void setRetireOOO(bool newVal) { RetireOOO = newVal; }
+
+ bool getMayLoad() const { return MayLoad; }
+ bool getMayStore() const { return MayStore; }
+ bool getHasSideEffects() const { return HasSideEffects; }
+ bool getBeginGroup() const { return BeginGroup; }
+ bool getEndGroup() const { return EndGroup; }
+ bool getRetireOOO() const { return RetireOOO; }
};
/// An instruction propagated through the simulated instruction pipeline.
#endif
unsigned LSUnit::dispatch(const InstRef &IR) {
- const InstrDesc &Desc = IR.getInstruction()->getDesc();
- bool IsStoreBarrier = IR.getInstruction()->isAStoreBarrier();
- bool IsLoadBarrier = IR.getInstruction()->isALoadBarrier();
- assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
+ const Instruction &IS = *IR.getInstruction();
+ bool IsStoreBarrier = IS.isAStoreBarrier();
+ bool IsLoadBarrier = IS.isALoadBarrier();
+ assert((IS.getMayLoad() || IS.getMayStore()) && "Not a memory operation!");
- if (Desc.MayLoad)
+ if (IS.getMayLoad())
acquireLQSlot();
- if (Desc.MayStore)
+ if (IS.getMayStore())
acquireSQSlot();
- if (Desc.MayStore) {
+ if (IS.getMayStore()) {
unsigned NewGID = createMemoryGroup();
MemoryGroup &NewGroup = getGroup(NewGID);
NewGroup.addInstruction();
if (IsStoreBarrier)
CurrentStoreBarrierGroupID = NewGID;
- if (Desc.MayLoad) {
+ if (IS.getMayLoad()) {
CurrentLoadGroupID = NewGID;
if (IsLoadBarrier)
CurrentLoadBarrierGroupID = NewGID;
return NewGID;
}
- assert(Desc.MayLoad && "Expected a load!");
+ assert(IS.getMayLoad() && "Expected a load!");
unsigned ImmediateLoadDominator =
std::max(CurrentLoadGroupID, CurrentLoadBarrierGroupID);
}
LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
- const InstrDesc &Desc = IR.getInstruction()->getDesc();
- if (Desc.MayLoad && isLQFull())
+ const Instruction &IS = *IR.getInstruction();
+ if (IS.getMayLoad() && isLQFull())
return LSUnit::LSU_LQUEUE_FULL;
- if (Desc.MayStore && isSQFull())
+ if (IS.getMayStore() && isSQFull())
return LSUnit::LSU_SQUEUE_FULL;
return LSUnit::LSU_AVAILABLE;
}
}
void LSUnitBase::onInstructionRetired(const InstRef &IR) {
- const InstrDesc &Desc = IR.getInstruction()->getDesc();
- bool IsALoad = Desc.MayLoad;
- bool IsAStore = Desc.MayStore;
+ const Instruction &IS = *IR.getInstruction();
+ bool IsALoad = IS.getMayLoad();
+ bool IsAStore = IS.getMayStore();
assert((IsALoad || IsAStore) && "Expected a memory operation!");
if (IsALoad) {
LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOpcode=" << Opcode << '\n');
// Create a new empty descriptor.
std::unique_ptr<InstrDesc> ID = std::make_unique<InstrDesc>();
FirstReturnInst = false;
}
- ID->MayLoad = MCDesc.mayLoad();
- ID->MayStore = MCDesc.mayStore();
- ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
- ID->BeginGroup = SCDesc.BeginGroup;
- ID->EndGroup = SCDesc.EndGroup;
- ID->RetireOOO = SCDesc.RetireOOO;
-
initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
computeMaxLatency(*ID, MCDesc, SCDesc, STI);
std::unique_ptr<Instruction> NewIS =
std::make_unique<Instruction>(D, MCI.getOpcode());
+ const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
+ const MCSchedClassDesc &SCDesc =
+ *STI.getSchedModel().getSchedClassDesc(D.SchedClassID);
+
+ NewIS->setMayLoad(MCDesc.mayLoad());
+ NewIS->setMayStore(MCDesc.mayStore());
+ NewIS->setHasSideEffects(MCDesc.hasUnmodeledSideEffects());
+ NewIS->setBeginGroup(SCDesc.BeginGroup);
+ NewIS->setEndGroup(SCDesc.EndGroup);
+ NewIS->setRetireOOO(SCDesc.RetireOOO);
+
// Check if this is a dependency breaking instruction.
APInt Mask;
Error DispatchStage::dispatch(InstRef IR) {
assert(!CarryOver && "Cannot dispatch another instruction!");
Instruction &IS = *IR.getInstruction();
- const InstrDesc &Desc = IS.getDesc();
const unsigned NumMicroOps = IS.getNumMicroOps();
if (NumMicroOps > DispatchWidth) {
assert(AvailableEntries == DispatchWidth);
}
// Check if this instructions ends the dispatch group.
- if (Desc.EndGroup)
+ if (IS.getEndGroup())
AvailableEntries = 0;
// Check if this is an optimizable reg-reg move or an XCHG-like instruction.
if (Required > AvailableEntries)
return false;
- if (Desc.BeginGroup && AvailableEntries != DispatchWidth)
+ if (Inst.getBeginGroup() && AvailableEntries != DispatchWidth)
return false;
// The dispatch logic doesn't internally buffer instructions. It only accepts
// Ensure that instructions eliminated at register renaming stage are in a
// consistent state.
- const InstrDesc &Desc = Inst.getDesc();
- assert(!Desc.MayLoad && !Desc.MayStore && "Cannot eliminate a memory op!");
+ assert(!Inst.getMayLoad() && !Inst.getMayStore() &&
+ "Cannot eliminate a memory op!");
}
#endif
const Instruction &Inst = *IR.getInstruction();
unsigned NumMicroOps = Inst.getNumMicroOps();
- const InstrDesc &Desc = Inst.getDesc();
bool ShouldCarryOver = NumMicroOps > getIssueWidth();
if (Bandwidth < NumMicroOps && !ShouldCarryOver)
// Instruction with BeginGroup must be the first instruction to be issued in a
// cycle.
- if (Desc.BeginGroup && NumIssued != 0)
+ if (Inst.getBeginGroup() && NumIssued != 0)
return false;
return true;
}
if (LastWriteBackCycle) {
- if (!IR.getInstruction()->getDesc().RetireOOO) {
+ if (!IR.getInstruction()->getRetireOOO()) {
unsigned NextWriteBackCycle = findFirstWriteBackCycle(IR);
// Delay the instruction to ensure that writes happen in program order.
if (NextWriteBackCycle < LastWriteBackCycle) {
LLVM_DEBUG(dbgs() << "[N] Carry over #" << IR << " \n");
} else {
NumIssued += NumMicroOps;
- Bandwidth = Desc.EndGroup ? 0 : Bandwidth - NumMicroOps;
+ Bandwidth = IS.getEndGroup() ? 0 : Bandwidth - NumMicroOps;
}
// If the instruction has a latency of 0, we need to handle
IssuedInst.push_back(IR);
- if (!IR.getInstruction()->getDesc().RetireOOO)
+ if (!IR.getInstruction()->getRetireOOO())
LastWriteBackCycle = IS.getCyclesLeft();
return llvm::ErrorSuccess();
LLVM_DEBUG(dbgs() << "[N] Carry over (complete) #" << CarriedOver << " \n");
- if (CarriedOver.getInstruction()->getDesc().EndGroup)
+ if (CarriedOver.getInstruction()->getEndGroup())
Bandwidth = 0;
else
Bandwidth -= CarryOver;
} else if (Event.Type == HWInstructionEvent::Dispatched) {
const Instruction &Inst = *Event.IR.getInstruction();
const unsigned Index = Event.IR.getSourceIndex();
- if (LQResourceID && Inst.getDesc().MayLoad &&
+ if (LQResourceID && Inst.getMayLoad() &&
MostRecentLoadDispatched != Index) {
Usage[LQResourceID].SlotsInUse++;
MostRecentLoadDispatched = Index;
}
- if (SQResourceID && Inst.getDesc().MayStore &&
+ if (SQResourceID && Inst.getMayStore() &&
MostRecentStoreDispatched != Index) {
Usage[SQResourceID].SlotsInUse++;
MostRecentStoreDispatched = Index;
}
} else if (Event.Type == HWInstructionEvent::Executed) {
const Instruction &Inst = *Event.IR.getInstruction();
- if (LQResourceID && Inst.getDesc().MayLoad) {
+ if (LQResourceID && Inst.getMayLoad()) {
assert(Usage[LQResourceID].SlotsInUse);
Usage[LQResourceID].SlotsInUse--;
}
- if (SQResourceID && Inst.getDesc().MayStore) {
+ if (SQResourceID && Inst.getMayStore()) {
assert(Usage[SQResourceID].SlotsInUse);
Usage[SQResourceID].SlotsInUse--;
}
const MCSchedModel &SM = STI->getSchedModel();
+ std::unique_ptr<mca::InstrPostProcess> IPP;
+ if (!DisableCustomBehaviour) {
+ // TODO: It may be a good idea to separate CB and IPP so that they can
+ // be used independently of each other. What I mean by this is to add
+ // an extra command-line arg --disable-ipp so that CB and IPP can be
+ // toggled without needing to toggle both of them together.
+ IPP = std::unique_ptr<mca::InstrPostProcess>(
+ TheTarget->createInstrPostProcess(*STI, *MCII));
+ }
+ if (!IPP) {
+ // If the target doesn't have its own IPP implemented (or the -disable-cb
+ // flag is set) then we use the base class (which does nothing).
+ IPP = std::make_unique<mca::InstrPostProcess>(*STI, *MCII);
+ }
+
// Create an instruction builder.
mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get());
ArrayRef<MCInst> Insts = Region->getInstructions();
mca::CodeEmitter CE(*STI, *MAB, *MCE, Insts);
- std::unique_ptr<mca::InstrPostProcess> IPP;
- if (!DisableCustomBehaviour) {
- IPP = std::unique_ptr<mca::InstrPostProcess>(
- TheTarget->createInstrPostProcess(*STI, *MCII));
- }
- if (!IPP)
- // If the target doesn't have its own IPP implemented (or the
- // -disable-cb flag is set) then we use the base class
- // (which does nothing).
- IPP = std::make_unique<mca::InstrPostProcess>(*STI, *MCII);
+ IPP->resetState();
SmallVector<std::unique_ptr<mca::Instruction>> LoweredSequence;
for (const MCInst &MCI : Insts) {