1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/scheduler.h"
7 #include "src/bit-vector.h"
8 #include "src/compiler/common-operator.h"
9 #include "src/compiler/control-equivalence.h"
10 #include "src/compiler/graph.h"
11 #include "src/compiler/node.h"
12 #include "src/compiler/node-marker.h"
13 #include "src/compiler/node-properties.h"
14 #include "src/zone-containers.h"
20 static inline void Trace(const char* msg, ...) {
21 if (FLAG_trace_turbo_scheduler) {
23 va_start(arguments, msg);
24 base::OS::VPrint(msg, arguments);
30 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
35 scheduled_nodes_(zone),
36 schedule_root_nodes_(zone),
37 schedule_queue_(zone),
38 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
41 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
42 Schedule* schedule = new (graph->zone())
43 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
44 Scheduler scheduler(zone, graph, schedule, flags);
47 scheduler.ComputeSpecialRPONumbering();
48 scheduler.GenerateImmediateDominatorTree();
50 scheduler.PrepareUses();
51 scheduler.ScheduleEarly();
52 scheduler.ScheduleLate();
54 scheduler.SealFinalSchedule();
60 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
61 SchedulerData def = {schedule_->start(), 0, kUnknown};
66 Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
67 DCHECK(node->id() < static_cast<int>(node_data_.size()));
68 return &node_data_[node->id()];
72 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
73 SchedulerData* data = GetData(node);
74 if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
75 switch (node->opcode()) {
76 case IrOpcode::kParameter:
77 case IrOpcode::kOsrValue:
78 // Parameters and OSR values are always fixed to the start block.
79 data->placement_ = kFixed;
82 case IrOpcode::kEffectPhi: {
83 // Phis and effect phis are fixed if their control inputs are, whereas
84 // otherwise they are coupled to a floating control node.
85 Placement p = GetPlacement(NodeProperties::GetControlInput(node));
86 data->placement_ = (p == kFixed ? kFixed : kCoupled);
89 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
90 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
91 #undef DEFINE_CONTROL_CASE
93 // Control nodes that were not control-reachable from end may float.
94 data->placement_ = kSchedulable;
98 data->placement_ = kSchedulable;
102 return data->placement_;
106 void Scheduler::UpdatePlacement(Node* node, Placement placement) {
107 SchedulerData* data = GetData(node);
108 if (data->placement_ != kUnknown) { // Trap on mutation, not initialization.
109 switch (node->opcode()) {
110 case IrOpcode::kParameter:
111 // Parameters are fixed once and for all.
115 case IrOpcode::kEffectPhi: {
116 // Phis and effect phis are coupled to their respective blocks.
117 DCHECK_EQ(Scheduler::kCoupled, data->placement_);
118 DCHECK_EQ(Scheduler::kFixed, placement);
119 Node* control = NodeProperties::GetControlInput(node);
120 BasicBlock* block = schedule_->block(control);
121 schedule_->AddNode(block, node);
124 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
125 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
126 #undef DEFINE_CONTROL_CASE
128 // Control nodes force coupled uses to be placed.
129 for (auto use : node->uses()) {
130 if (GetPlacement(use) == Scheduler::kCoupled) {
131 DCHECK_EQ(node, NodeProperties::GetControlInput(use));
132 UpdatePlacement(use, placement);
138 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
139 DCHECK_EQ(Scheduler::kScheduled, placement);
142 // Reduce the use count of the node's inputs to potentially make them
143 // schedulable. If all the uses of a node have been scheduled, then the node
144 // itself can be scheduled.
145 for (Edge const edge : node->input_edges()) {
146 DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
149 data->placement_ = placement;
153 bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
154 return GetPlacement(node) == kCoupled &&
155 NodeProperties::FirstControlIndex(node) == index;
159 void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
161 // Make sure that control edges from coupled nodes are not counted.
162 if (IsCoupledControlEdge(from, index)) return;
164 // Tracking use counts for fixed nodes is useless.
165 if (GetPlacement(node) == kFixed) return;
167 // Use count for coupled nodes is summed up on their control.
168 if (GetPlacement(node) == kCoupled) {
169 Node* control = NodeProperties::GetControlInput(node);
170 return IncrementUnscheduledUseCount(control, index, from);
173 ++(GetData(node)->unscheduled_count_);
174 if (FLAG_trace_turbo_scheduler) {
175 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
176 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
177 GetData(node)->unscheduled_count_);
182 void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
184 // Make sure that control edges from coupled nodes are not counted.
185 if (IsCoupledControlEdge(from, index)) return;
187 // Tracking use counts for fixed nodes is useless.
188 if (GetPlacement(node) == kFixed) return;
190 // Use count for coupled nodes is summed up on their control.
191 if (GetPlacement(node) == kCoupled) {
192 Node* control = NodeProperties::GetControlInput(node);
193 return DecrementUnscheduledUseCount(control, index, from);
196 DCHECK(GetData(node)->unscheduled_count_ > 0);
197 --(GetData(node)->unscheduled_count_);
198 if (FLAG_trace_turbo_scheduler) {
199 Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
200 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
201 GetData(node)->unscheduled_count_);
203 if (GetData(node)->unscheduled_count_ == 0) {
204 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
205 schedule_queue_.push(node);
210 // -----------------------------------------------------------------------------
211 // Phase 1: Build control-flow graph.
214 // Internal class to build a control flow graph (i.e the basic blocks and edges
215 // between them within a Schedule) from the node graph. Visits control edges of
216 // the graph backwards from an end node in order to find the connected control
217 // subgraph, needed for scheduling.
218 class CFGBuilder : public ZoneObject {
220 CFGBuilder(Zone* zone, Scheduler* scheduler)
222 scheduler_(scheduler),
223 schedule_(scheduler->schedule_),
224 queued_(scheduler->graph_, 2),
227 component_entry_(NULL),
228 component_start_(NULL),
229 component_end_(NULL) {}
231 // Run the control flow graph construction algorithm by walking the graph
232 // backwards from end through control edges, building and connecting the
233 // basic blocks for control nodes.
235 ResetDataStructures();
236 Queue(scheduler_->graph_->end());
238 while (!queue_.empty()) { // Breadth-first backwards traversal.
239 Node* node = queue_.front();
241 int max = NodeProperties::PastControlIndex(node);
242 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
243 Queue(node->InputAt(i));
247 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
248 ConnectBlocks(*i); // Connect block to its predecessor/successors.
252 // Run the control flow graph construction for a minimal control-connected
253 // component ending in {exit} and merge that component into an existing
254 // control flow graph at the bottom of {block}.
255 void Run(BasicBlock* block, Node* exit) {
256 ResetDataStructures();
259 component_entry_ = NULL;
260 component_start_ = block;
261 component_end_ = schedule_->block(exit);
262 scheduler_->equivalence_->Run(exit);
263 while (!queue_.empty()) { // Breadth-first backwards traversal.
264 Node* node = queue_.front();
267 // Use control dependence equivalence to find a canonical single-entry
268 // single-exit region that makes up a minimal component to be scheduled.
269 if (IsSingleEntrySingleExitRegion(node, exit)) {
270 Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
271 DCHECK(!component_entry_);
272 component_entry_ = node;
276 int max = NodeProperties::PastControlIndex(node);
277 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
278 Queue(node->InputAt(i));
281 DCHECK(component_entry_);
283 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
284 ConnectBlocks(*i); // Connect block to its predecessor/successors.
289 // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
290 friend class Scheduler;
292 void FixNode(BasicBlock* block, Node* node) {
293 schedule_->AddNode(block, node);
294 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
297 void Queue(Node* node) {
298 // Mark the connected control nodes as they are queued.
299 if (!queued_.Get(node)) {
302 queued_.Set(node, true);
303 control_.push_back(node);
307 void BuildBlocks(Node* node) {
308 switch (node->opcode()) {
310 FixNode(schedule_->end(), node);
312 case IrOpcode::kStart:
313 FixNode(schedule_->start(), node);
315 case IrOpcode::kLoop:
316 case IrOpcode::kMerge:
317 BuildBlockForNode(node);
319 case IrOpcode::kBranch:
320 case IrOpcode::kSwitch:
321 BuildBlocksForSuccessors(node);
328 void ConnectBlocks(Node* node) {
329 switch (node->opcode()) {
330 case IrOpcode::kLoop:
331 case IrOpcode::kMerge:
334 case IrOpcode::kBranch:
335 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
338 case IrOpcode::kSwitch:
339 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
342 case IrOpcode::kReturn:
343 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
346 case IrOpcode::kThrow:
347 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
355 BasicBlock* BuildBlockForNode(Node* node) {
356 BasicBlock* block = schedule_->block(node);
358 block = schedule_->NewBasicBlock();
359 Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
360 node->op()->mnemonic());
361 FixNode(block, node);
366 void BuildBlocksForSuccessors(Node* node) {
367 size_t const successor_cnt = node->op()->ControlOutputCount();
368 Node** successors = zone_->NewArray<Node*>(successor_cnt);
369 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
370 for (size_t index = 0; index < successor_cnt; ++index) {
371 BuildBlockForNode(successors[index]);
375 void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
376 size_t successor_cnt) {
377 Node** successors = reinterpret_cast<Node**>(successor_blocks);
378 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
379 for (size_t index = 0; index < successor_cnt; ++index) {
380 successor_blocks[index] = schedule_->block(successors[index]);
384 void ConnectBranch(Node* branch) {
385 BasicBlock* successor_blocks[2];
386 CollectSuccessorBlocks(branch, successor_blocks,
387 arraysize(successor_blocks));
389 // Consider branch hints.
390 switch (BranchHintOf(branch->op())) {
391 case BranchHint::kNone:
393 case BranchHint::kTrue:
394 successor_blocks[1]->set_deferred(true);
396 case BranchHint::kFalse:
397 successor_blocks[0]->set_deferred(true);
401 if (branch == component_entry_) {
402 TraceConnect(branch, component_start_, successor_blocks[0]);
403 TraceConnect(branch, component_start_, successor_blocks[1]);
404 schedule_->InsertBranch(component_start_, component_end_, branch,
405 successor_blocks[0], successor_blocks[1]);
407 Node* branch_block_node = NodeProperties::GetControlInput(branch);
408 BasicBlock* branch_block = schedule_->block(branch_block_node);
409 DCHECK_NOT_NULL(branch_block);
411 TraceConnect(branch, branch_block, successor_blocks[0]);
412 TraceConnect(branch, branch_block, successor_blocks[1]);
413 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
414 successor_blocks[1]);
418 void ConnectSwitch(Node* sw) {
419 size_t const successor_count = sw->op()->ControlOutputCount();
420 BasicBlock** successor_blocks =
421 zone_->NewArray<BasicBlock*>(successor_count);
422 CollectSuccessorBlocks(sw, successor_blocks, successor_count);
424 if (sw == component_entry_) {
425 for (size_t index = 0; index < successor_count; ++index) {
426 TraceConnect(sw, component_start_, successor_blocks[index]);
428 schedule_->InsertSwitch(component_start_, component_end_, sw,
429 successor_blocks, successor_count);
431 Node* sw_block_node = NodeProperties::GetControlInput(sw);
432 BasicBlock* sw_block = schedule_->block(sw_block_node);
433 DCHECK_NOT_NULL(sw_block);
435 for (size_t index = 0; index < successor_count; ++index) {
436 TraceConnect(sw, sw_block, successor_blocks[index]);
438 schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count);
442 void ConnectMerge(Node* merge) {
443 // Don't connect the special merge at the end to its predecessors.
444 if (IsFinalMerge(merge)) return;
446 BasicBlock* block = schedule_->block(merge);
447 DCHECK_NOT_NULL(block);
448 // For all of the merge's control inputs, add a goto at the end to the
449 // merge's basic block.
450 for (Node* const input : merge->inputs()) {
451 BasicBlock* predecessor_block = schedule_->block(input);
452 TraceConnect(merge, predecessor_block, block);
453 schedule_->AddGoto(predecessor_block, block);
457 void ConnectReturn(Node* ret) {
458 Node* return_block_node = NodeProperties::GetControlInput(ret);
459 BasicBlock* return_block = schedule_->block(return_block_node);
460 TraceConnect(ret, return_block, NULL);
461 schedule_->AddReturn(return_block, ret);
464 void ConnectThrow(Node* thr) {
465 Node* throw_block_node = NodeProperties::GetControlInput(thr);
466 BasicBlock* throw_block = schedule_->block(throw_block_node);
467 TraceConnect(thr, throw_block, NULL);
468 schedule_->AddThrow(throw_block, thr);
471 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
472 DCHECK_NOT_NULL(block);
474 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
475 block->id().ToInt());
477 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
478 block->id().ToInt(), succ->id().ToInt());
482 bool IsFinalMerge(Node* node) {
483 return (node->opcode() == IrOpcode::kMerge &&
484 node == scheduler_->graph_->end()->InputAt(0));
487 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
488 size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
489 size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
490 return entry != exit && entry_class == exit_class;
493 void ResetDataStructures() {
495 DCHECK(queue_.empty());
496 DCHECK(control_.empty());
500 Scheduler* scheduler_;
502 NodeMarker<bool> queued_; // Mark indicating whether node is queued.
503 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal.
504 NodeVector control_; // List of encountered control nodes.
505 Node* component_entry_; // Component single-entry node.
506 BasicBlock* component_start_; // Component single-entry block.
507 BasicBlock* component_end_; // Component single-exit block.
511 void Scheduler::BuildCFG() {
512 Trace("--- CREATING CFG -------------------------------------------\n");
514 // Instantiate a new control equivalence algorithm for the graph.
515 equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
517 // Build a control-flow graph for the main control-connected component that
518 // is being spanned by the graph's start and end nodes.
519 control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
520 control_flow_builder_->Run();
522 // Initialize per-block data.
523 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
527 // -----------------------------------------------------------------------------
528 // Phase 2: Compute special RPO and dominator tree.
531 // Compute the special reverse-post-order block ordering, which is essentially
532 // a RPO of the graph where loop bodies are contiguous. Properties:
533 // 1. If block A is a predecessor of B, then A appears before B in the order,
534 // unless B is a loop header and A is in the loop headed at B
535 // (i.e. A -> B is a backedge).
536 // => If block A dominates block B, then A appears before B in the order.
537 // => If block A is a loop header, A appears before all blocks in the loop
539 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
540 // do not belong to the loop.)
541 // Note a simple RPO traversal satisfies (1) but not (2).
542 class SpecialRPONumberer : public ZoneObject {
544 SpecialRPONumberer(Zone* zone, Schedule* schedule)
552 previous_block_count_(0) {}
554 // Computes the special reverse-post-order for the main control flow graph,
555 // that is for the graph spanned between the schedule's start and end blocks.
556 void ComputeSpecialRPO() {
557 DCHECK(schedule_->end()->SuccessorCount() == 0);
558 DCHECK(!order_); // Main order does not exist yet.
559 ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
562 // Computes the special reverse-post-order for a partial control flow graph,
563 // that is for the graph spanned between the given {entry} and {end} blocks,
564 // then updates the existing ordering with this new information.
565 void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
566 DCHECK(order_); // Main order to be updated is present.
567 ComputeAndInsertSpecialRPO(entry, end);
570 // Serialize the previously computed order as a special reverse-post-order
571 // numbering for basic blocks into the final schedule.
572 void SerializeRPOIntoSchedule() {
574 for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
575 b->set_rpo_number(number++);
576 schedule_->rpo_order()->push_back(b);
578 BeyondEndSentinel()->set_rpo_number(number);
581 // Print and verify the special reverse-post-order.
582 void PrintAndVerifySpecialRPO() {
584 if (FLAG_trace_turbo_scheduler) PrintRPO();
590 typedef std::pair<BasicBlock*, size_t> Backedge;
592 // Numbering for BasicBlock::rpo_number for this block traversal:
593 static const int kBlockOnStack = -2;
594 static const int kBlockVisited1 = -3;
595 static const int kBlockVisited2 = -4;
596 static const int kBlockUnvisited1 = -1;
597 static const int kBlockUnvisited2 = kBlockVisited1;
599 struct SpecialRPOStackFrame {
606 ZoneList<BasicBlock*>* outgoing;
612 void AddOutgoing(Zone* zone, BasicBlock* block) {
613 if (outgoing == NULL) {
614 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
616 outgoing->Add(block, zone);
620 int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
621 BasicBlock* child, int unvisited) {
622 if (child->rpo_number() == unvisited) {
623 stack[depth].block = child;
624 stack[depth].index = 0;
625 child->set_rpo_number(kBlockOnStack);
631 BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
632 block->set_rpo_next(head);
636 static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
637 static void SetLoopNumber(BasicBlock* block, int loop_number) {
638 return block->set_loop_number(loop_number);
640 static bool HasLoopNumber(BasicBlock* block) {
641 return block->loop_number() >= 0;
644 // TODO(mstarzinger): We only need this special sentinel because some tests
645 // use the schedule's end block in actual control flow (e.g. with end having
646 // successors). Once this has been cleaned up we can use the end block here.
647 BasicBlock* BeyondEndSentinel() {
648 if (beyond_end_ == NULL) {
649 BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
650 beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
655 // Compute special RPO for the control flow graph between {entry} and {end},
656 // mutating any existing order so that the result is still valid.
657 void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
658 // RPO should not have been serialized for this schedule yet.
659 CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
660 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
661 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
663 // Find correct insertion point within existing order.
664 BasicBlock* insertion_point = entry->rpo_next();
665 BasicBlock* order = insertion_point;
667 // Perform an iterative RPO traversal using an explicit stack,
668 // recording backedges that form cycles. O(|B|).
669 DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
670 stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
671 previous_block_count_ = schedule_->BasicBlockCount();
672 int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
673 int num_loops = static_cast<int>(loops_.size());
675 while (stack_depth > 0) {
676 int current = stack_depth - 1;
677 SpecialRPOStackFrame* frame = &stack_[current];
679 if (frame->block != end &&
680 frame->index < frame->block->SuccessorCount()) {
681 // Process the next successor.
682 BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
683 if (succ->rpo_number() == kBlockVisited1) continue;
684 if (succ->rpo_number() == kBlockOnStack) {
685 // The successor is on the stack, so this is a backedge (cycle).
686 backedges_.push_back(Backedge(frame->block, frame->index - 1));
687 if (!HasLoopNumber(succ)) {
688 // Assign a new loop number to the header if it doesn't have one.
689 SetLoopNumber(succ, num_loops++);
692 // Push the successor onto the stack.
693 DCHECK(succ->rpo_number() == kBlockUnvisited1);
694 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
697 // Finished with all successors; pop the stack and add the block.
698 order = PushFront(order, frame->block);
699 frame->block->set_rpo_number(kBlockVisited1);
704 // If no loops were encountered, then the order we computed was correct.
705 if (num_loops > static_cast<int>(loops_.size())) {
706 // Otherwise, compute the loop information from the backedges in order
707 // to perform a traversal that groups loop bodies together.
708 ComputeLoopInfo(stack_, num_loops, &backedges_);
710 // Initialize the "loop stack". Note the entry could be a loop header.
712 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
713 order = insertion_point;
715 // Perform an iterative post-order traversal, visiting loop bodies before
716 // edges that lead out of loops. Visits each block once, but linking loop
717 // sections together is linear in the loop size, so overall is
718 // O(|B| + max(loop_depth) * max(|loop|))
719 stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
720 while (stack_depth > 0) {
721 SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
722 BasicBlock* block = frame->block;
723 BasicBlock* succ = NULL;
725 if (block != end && frame->index < block->SuccessorCount()) {
726 // Process the next normal successor.
727 succ = block->SuccessorAt(frame->index++);
728 } else if (HasLoopNumber(block)) {
729 // Process additional outgoing edges from the loop header.
730 if (block->rpo_number() == kBlockOnStack) {
731 // Finish the loop body the first time the header is left on the
733 DCHECK(loop != NULL && loop->header == block);
734 loop->start = PushFront(order, block);
736 block->set_rpo_number(kBlockVisited2);
737 // Pop the loop stack and continue visiting outgoing edges within
738 // the context of the outer loop, if any.
740 // We leave the loop header on the stack; the rest of this iteration
741 // and later iterations will go through its outgoing edges list.
744 // Use the next outgoing edge if there are any.
746 static_cast<int>(frame->index - block->SuccessorCount());
747 LoopInfo* info = &loops_[GetLoopNumber(block)];
748 DCHECK(loop != info);
749 if (block != entry && info->outgoing != NULL &&
750 outgoing_index < info->outgoing->length()) {
751 succ = info->outgoing->at(outgoing_index);
757 // Process the next successor.
758 if (succ->rpo_number() == kBlockOnStack) continue;
759 if (succ->rpo_number() == kBlockVisited2) continue;
760 DCHECK(succ->rpo_number() == kBlockUnvisited2);
761 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
762 // The successor is not in the current loop or any nested loop.
763 // Add it to the outgoing edges of this loop and visit it later.
764 loop->AddOutgoing(zone_, succ);
766 // Push the successor onto the stack.
767 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
768 if (HasLoopNumber(succ)) {
769 // Push the inner loop onto the loop stack.
770 DCHECK(GetLoopNumber(succ) < num_loops);
771 LoopInfo* next = &loops_[GetLoopNumber(succ)];
778 // Finished with all successors of the current block.
779 if (HasLoopNumber(block)) {
780 // If we are going to pop a loop header, then add its entire body.
781 LoopInfo* info = &loops_[GetLoopNumber(block)];
782 for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
783 if (b->rpo_next() == info->end) {
784 b->set_rpo_next(order);
791 // Pop a single node off the stack and add it to the order.
792 order = PushFront(order, block);
793 block->set_rpo_number(kBlockVisited2);
800 // Publish new order the first time.
801 if (order_ == NULL) order_ = order;
803 // Compute the correct loop headers and set the correct loop ends.
804 LoopInfo* current_loop = NULL;
805 BasicBlock* current_header = entry->loop_header();
806 int32_t loop_depth = entry->loop_depth();
807 if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
808 for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
809 BasicBlock* current = b;
811 // Reset BasicBlock::rpo_number again.
812 current->set_rpo_number(kBlockUnvisited1);
814 // Finish the previous loop(s) if we just exited them.
815 while (current_header != NULL && current == current_header->loop_end()) {
816 DCHECK(current_header->IsLoopHeader());
817 DCHECK(current_loop != NULL);
818 current_loop = current_loop->prev;
819 current_header = current_loop == NULL ? NULL : current_loop->header;
822 current->set_loop_header(current_header);
824 // Push a new loop onto the stack if this loop is a loop header.
825 if (HasLoopNumber(current)) {
827 current_loop = &loops_[GetLoopNumber(current)];
828 BasicBlock* end = current_loop->end;
829 current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
830 current_header = current_loop->header;
831 Trace("B%d is a loop header, increment loop depth to %d\n",
832 current->id().ToInt(), loop_depth);
835 current->set_loop_depth(loop_depth);
837 if (current->loop_header() == NULL) {
838 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
839 current->loop_depth());
841 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
842 current->loop_header()->id().ToInt(), current->loop_depth());
847 // Computes loop membership from the backedges of the control flow graph.
848 void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
849 size_t num_loops, ZoneVector<Backedge>* backedges) {
850 // Extend existing loop membership vectors.
851 for (LoopInfo& loop : loops_) {
852 BitVector* new_members = new (zone_)
853 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
854 new_members->CopyFrom(*loop.members);
855 loop.members = new_members;
858 // Extend loop information vector.
859 loops_.resize(num_loops, LoopInfo());
861 // Compute loop membership starting from backedges.
862 // O(max(loop_depth) * max(|loop|)
863 for (size_t i = 0; i < backedges->size(); i++) {
864 BasicBlock* member = backedges->at(i).first;
865 BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
866 size_t loop_num = GetLoopNumber(header);
867 if (loops_[loop_num].header == NULL) {
868 loops_[loop_num].header = header;
869 loops_[loop_num].members = new (zone_)
870 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
873 int queue_length = 0;
874 if (member != header) {
875 // As long as the header doesn't have a backedge to itself,
876 // Push the member onto the queue and process its predecessors.
877 if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
878 loops_[loop_num].members->Add(member->id().ToInt());
880 queue[queue_length++].block = member;
883 // Propagate loop membership backwards. All predecessors of M up to the
884 // loop header H are members of the loop too. O(|blocks between M and H|).
885 while (queue_length > 0) {
886 BasicBlock* block = queue[--queue_length].block;
887 for (size_t i = 0; i < block->PredecessorCount(); i++) {
888 BasicBlock* pred = block->PredecessorAt(i);
889 if (pred != header) {
890 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
891 loops_[loop_num].members->Add(pred->id().ToInt());
892 queue[queue_length++].block = pred;
903 os << "RPO with " << loops_.size() << " loops";
904 if (loops_.size() > 0) {
906 for (size_t i = 0; i < loops_.size(); i++) {
907 if (i > 0) os << " ";
908 os << "B" << loops_[i].header->id();
914 for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
915 BasicBlock::Id bid = block->id();
916 // TODO(jarin,svenpanne): Add formatting here once we have support for
917 // that in streams (we want an equivalent of PrintF("%5d:", x) here).
918 os << " " << block->rpo_number() << ":";
919 for (size_t i = 0; i < loops_.size(); i++) {
920 bool range = loops_[i].header->LoopContains(block);
921 bool membership = loops_[i].header != block && range;
922 os << (membership ? " |" : " ");
923 os << (range ? "x" : " ");
925 os << " B" << bid << ": ";
926 if (block->loop_end() != NULL) {
927 os << " range: [" << block->rpo_number() << ", "
928 << block->loop_end()->rpo_number() << ")";
930 if (block->loop_header() != NULL) {
931 os << " header: B" << block->loop_header()->id();
933 if (block->loop_depth() > 0) {
934 os << " depth: " << block->loop_depth();
940 void VerifySpecialRPO() {
941 BasicBlockVector* order = schedule_->rpo_order();
942 DCHECK(order->size() > 0);
943 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
945 for (size_t i = 0; i < loops_.size(); i++) {
946 LoopInfo* loop = &loops_[i];
947 BasicBlock* header = loop->header;
948 BasicBlock* end = header->loop_end();
950 DCHECK(header != NULL);
951 DCHECK(header->rpo_number() >= 0);
952 DCHECK(header->rpo_number() < static_cast<int>(order->size()));
954 DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
955 DCHECK(end->rpo_number() > header->rpo_number());
956 DCHECK(header->loop_header() != header);
958 // Verify the start ... end list relationship.
960 BasicBlock* block = loop->start;
961 DCHECK_EQ(header, block);
964 if (block == NULL || block == loop->end) {
965 end_found = (loop->end == block);
968 // The list should be in same order as the final result.
969 DCHECK(block->rpo_number() == links + header->rpo_number());
971 block = block->rpo_next();
972 DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
975 DCHECK(links == end->rpo_number() - header->rpo_number());
978 // Check loop depth of the header.
980 for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
983 DCHECK_EQ(loop_depth, header->loop_depth());
985 // Check the contiguousness of loops.
987 for (int j = 0; j < static_cast<int>(order->size()); j++) {
988 BasicBlock* block = order->at(j);
989 DCHECK(block->rpo_number() == j);
990 if (j < header->rpo_number() || j >= end->rpo_number()) {
991 DCHECK(!header->LoopContains(block));
993 DCHECK(header->LoopContains(block));
994 DCHECK_GE(block->loop_depth(), loop_depth);
998 DCHECK(links == count);
1004 Schedule* schedule_;
1006 BasicBlock* beyond_end_;
1007 ZoneVector<LoopInfo> loops_;
1008 ZoneVector<Backedge> backedges_;
1009 ZoneVector<SpecialRPOStackFrame> stack_;
1010 size_t previous_block_count_;
1014 BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1015 SpecialRPONumberer numberer(zone, schedule);
1016 numberer.ComputeSpecialRPO();
1017 numberer.SerializeRPOIntoSchedule();
1018 numberer.PrintAndVerifySpecialRPO();
1019 return schedule->rpo_order();
1023 void Scheduler::ComputeSpecialRPONumbering() {
1024 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1026 // Compute the special reverse-post-order for basic blocks.
1027 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1028 special_rpo_->ComputeSpecialRPO();
1032 void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1033 for (/*nop*/; block != NULL; block = block->rpo_next()) {
1034 auto pred = block->predecessors().begin();
1035 auto end = block->predecessors().end();
1036 DCHECK(pred != end); // All blocks except start have predecessors.
1037 BasicBlock* dominator = *pred;
1038 // For multiple predecessors, walk up the dominator tree until a common
1039 // dominator is found. Visitation order guarantees that all predecessors
1040 // except for backwards edges have been visited.
1041 for (++pred; pred != end; ++pred) {
1042 // Don't examine backwards edges.
1043 if ((*pred)->dominator_depth() < 0) continue;
1044 dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1046 block->set_dominator(dominator);
1047 block->set_dominator_depth(dominator->dominator_depth() + 1);
1048 // Propagate "deferredness" of the dominator.
1049 if (dominator->deferred()) block->set_deferred(true);
1050 Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
1051 dominator->id().ToInt(), block->dominator_depth());
1056 void Scheduler::GenerateImmediateDominatorTree() {
1057 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1059 // Seed start block to be the first dominator.
1060 schedule_->start()->set_dominator_depth(0);
1062 // Build the block dominator tree resulting from the above seed.
1063 PropagateImmediateDominators(schedule_->start()->rpo_next());
1067 // -----------------------------------------------------------------------------
1068 // Phase 3: Prepare use counts for nodes.
1071 class PrepareUsesVisitor {
1073 explicit PrepareUsesVisitor(Scheduler* scheduler)
1074 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1076 void Pre(Node* node) {
1077 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1078 // Fixed nodes are always roots for schedule late.
1079 scheduler_->schedule_root_nodes_.push_back(node);
1080 if (!schedule_->IsScheduled(node)) {
1081 // Make sure root nodes are scheduled in their respective blocks.
1082 Trace("Scheduling fixed position node #%d:%s\n", node->id(),
1083 node->op()->mnemonic());
1084 IrOpcode::Value opcode = node->opcode();
1086 opcode == IrOpcode::kParameter
1087 ? schedule_->start()
1088 : schedule_->block(NodeProperties::GetControlInput(node));
1089 DCHECK(block != NULL);
1090 schedule_->AddNode(block, node);
1095 void PostEdge(Node* from, int index, Node* to) {
1096 // If the edge is from an unscheduled node, then tally it in the use count
1097 // for all of its inputs. The same criterion will be used in ScheduleLate
1098 // for decrementing use counts.
1099 if (!schedule_->IsScheduled(from)) {
1100 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1101 scheduler_->IncrementUnscheduledUseCount(to, index, from);
1106 Scheduler* scheduler_;
1107 Schedule* schedule_;
1111 void Scheduler::PrepareUses() {
1112 Trace("--- PREPARE USES -------------------------------------------\n");
1114 // Count the uses of every node, which is used to ensure that all of a
1115 // node's uses are scheduled before the node itself.
1116 PrepareUsesVisitor prepare_uses(this);
1118 // TODO(turbofan): simplify the careful pre/post ordering here.
1119 BoolVector visited(graph_->NodeCount(), false, zone_);
1120 ZoneStack<Node::InputEdges::iterator> stack(zone_);
1121 Node* node = graph_->end();
1122 prepare_uses.Pre(node);
1123 visited[node->id()] = true;
1124 stack.push(node->input_edges().begin());
1125 while (!stack.empty()) {
1126 Edge edge = *stack.top();
1127 Node* node = edge.to();
1128 if (visited[node->id()]) {
1129 prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1130 if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1132 prepare_uses.Pre(node);
1133 visited[node->id()] = true;
1134 if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1140 // -----------------------------------------------------------------------------
1141 // Phase 4: Schedule nodes early.
1144 class ScheduleEarlyNodeVisitor {
1146 ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1147 : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1149 // Run the schedule early algorithm on a set of fixed root nodes.
1150 void Run(NodeVector* roots) {
1151 for (Node* const root : *roots) {
1153 while (!queue_.empty()) {
1154 VisitNode(queue_.front());
1161 // Visits one node from the queue and propagates its current schedule early
1162 // position to all uses. This in turn might push more nodes onto the queue.
1163 void VisitNode(Node* node) {
1164 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1166 // Fixed nodes already know their schedule early position.
1167 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1168 data->minimum_block_ = schedule_->block(node);
1169 Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
1170 node->id(), node->op()->mnemonic(),
1171 data->minimum_block_->id().ToInt(),
1172 data->minimum_block_->dominator_depth());
1175 // No need to propagate unconstrained schedule early positions.
1176 if (data->minimum_block_ == schedule_->start()) return;
1178 // Propagate schedule early position.
1179 DCHECK(data->minimum_block_ != NULL);
1180 for (auto use : node->uses()) {
1181 PropagateMinimumPositionToNode(data->minimum_block_, use);
1185 // Propagates {block} as another minimum position into the given {node}. This
1186 // has the net effect of computing the minimum dominator block of {node} that
1187 // still post-dominates all inputs to {node} when the queue is processed.
1188 void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1189 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1191 // No need to propagate to fixed node, it's guaranteed to be a root.
1192 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1194 // Coupled nodes influence schedule early position of their control.
1195 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1196 Node* control = NodeProperties::GetControlInput(node);
1197 PropagateMinimumPositionToNode(block, control);
1200 // Propagate new position if it is deeper down the dominator tree than the
1201 // current. Note that all inputs need to have minimum block position inside
1202 // the dominator chain of {node}'s minimum block position.
1203 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1204 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1205 data->minimum_block_ = block;
1207 Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
1208 node->id(), node->op()->mnemonic(),
1209 data->minimum_block_->id().ToInt(),
1210 data->minimum_block_->dominator_depth());
1215 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1216 BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1217 return dominator == b1 || dominator == b2;
1221 Scheduler* scheduler_;
1222 Schedule* schedule_;
1223 ZoneQueue<Node*> queue_;
1227 void Scheduler::ScheduleEarly() {
1228 Trace("--- SCHEDULE EARLY -----------------------------------------\n");
1229 if (FLAG_trace_turbo_scheduler) {
1231 for (Node* node : schedule_root_nodes_) {
1232 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1237 // Compute the minimum block for each node thereby determining the earliest
1238 // position each node could be placed within a valid schedule.
1239 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1240 schedule_early_visitor.Run(&schedule_root_nodes_);
1244 // -----------------------------------------------------------------------------
1245 // Phase 5: Schedule nodes late.
1248 class ScheduleLateNodeVisitor {
1250 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1251 : scheduler_(scheduler),
1252 schedule_(scheduler_->schedule_),
1253 marked_(scheduler->zone_),
1254 marking_queue_(scheduler->zone_) {}
1256 // Run the schedule late algorithm on a set of fixed root nodes.
1257 void Run(NodeVector* roots) {
1258 for (Node* const root : *roots) {
1264 void ProcessQueue(Node* root) {
1265 ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1266 for (Node* node : root->inputs()) {
1267 // Don't schedule coupled nodes on their own.
1268 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1269 node = NodeProperties::GetControlInput(node);
1272 // Test schedulability condition by looking at unscheduled use count.
1273 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1277 Node* const node = queue->front();
1280 } while (!queue->empty());
1284 // Visits one node from the queue of schedulable nodes and determines its
1285 // schedule late position. Also hoists nodes out of loops to find a more
1286 // optimal scheduling position.
1287 void VisitNode(Node* node) {
1288 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1290 // Don't schedule nodes that are already scheduled.
1291 if (schedule_->IsScheduled(node)) return;
1292 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1294 // Determine the dominating block for all of the uses of this node. It is
1295 // the latest block that this node can be scheduled in.
1296 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1297 BasicBlock* block = GetCommonDominatorOfUses(node);
1298 DCHECK_NOT_NULL(block);
1300 // The schedule early block dominates the schedule late block.
1301 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1302 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1303 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
1304 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1305 block->loop_depth(), min_block->id().ToInt());
1307 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1308 // into enclosing loop pre-headers until they would preceed their schedule
1310 BasicBlock* hoist_block = GetPreHeader(block);
1312 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1314 Trace(" hoisting #%d:%s to block B%d\n", node->id(),
1315 node->op()->mnemonic(), hoist_block->id().ToInt());
1316 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1317 block = hoist_block;
1318 hoist_block = GetPreHeader(hoist_block);
1319 } while (hoist_block &&
1320 hoist_block->dominator_depth() >= min_block->dominator_depth());
1321 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1322 // Split the {node} if beneficial and return the new {block} for it.
1323 block = SplitNode(block, node);
1326 // Schedule the node or a floating control structure.
1327 if (NodeProperties::IsControl(node)) {
1328 ScheduleFloatingControl(block, node);
1330 ScheduleNode(block, node);
1334 // Mark {block} and push its non-marked predecessor on the marking queue.
1335 void MarkBlock(BasicBlock* block) {
1336 DCHECK_LT(block->id().ToSize(), marked_.size());
1337 marked_[block->id().ToSize()] = true;
1338 for (BasicBlock* pred_block : block->predecessors()) {
1339 DCHECK_LT(pred_block->id().ToSize(), marked_.size());
1340 if (marked_[pred_block->id().ToSize()]) continue;
1341 marking_queue_.push_back(pred_block);
1345 BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1346 // For now, we limit splitting to pure nodes.
1347 if (!node->op()->HasProperty(Operator::kPure)) return block;
1349 // The {block} is common dominator of all uses of {node}, so we cannot
1350 // split anything unless the {block} has at least two successors.
1351 DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1352 if (block->SuccessorCount() < 2) return block;
1354 // Clear marking bits.
1355 DCHECK(marking_queue_.empty());
1356 std::fill(marked_.begin(), marked_.end(), false);
1357 marked_.resize(schedule_->BasicBlockCount() + 1, false);
1359 // Check if the {node} has uses in {block}.
1360 for (Edge edge : node->use_edges()) {
1361 BasicBlock* use_block = GetBlockForUse(edge);
1362 if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1363 if (use_block == block) {
1364 Trace(" not splitting #%d:%s, it is used in B%d\n", node->id(),
1365 node->op()->mnemonic(), block->id().ToInt());
1366 marking_queue_.clear();
1369 MarkBlock(use_block);
1372 // Compute transitive marking closure; a block is marked if all its
1373 // successors are marked.
1375 BasicBlock* top_block = marking_queue_.front();
1376 marking_queue_.pop_front();
1377 if (marked_[top_block->id().ToSize()]) continue;
1379 for (BasicBlock* successor : top_block->successors()) {
1380 if (!marked_[successor->id().ToSize()]) {
1385 if (marked) MarkBlock(top_block);
1386 } while (!marking_queue_.empty());
1388 // If the (common dominator) {block} is marked, we know that all paths from
1389 // {block} to the end contain at least one use of {node}, and hence there's
1390 // no point in splitting the {node} in this case.
1391 if (marked_[block->id().ToSize()]) {
1392 Trace(" not splitting #%d:%s, its common dominator B%d is perfect\n",
1393 node->id(), node->op()->mnemonic(), block->id().ToInt());
1397 // Split {node} for uses according to the previously computed marking
1398 // closure. Every marking partition has a unique dominator, which get's a
1399 // copy of the {node} with the exception of the first partition, which get's
1400 // the {node} itself.
1401 ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1402 for (Edge edge : node->use_edges()) {
1403 BasicBlock* use_block = GetBlockForUse(edge);
1404 if (use_block == nullptr) continue;
1405 while (marked_[use_block->dominator()->id().ToSize()]) {
1406 use_block = use_block->dominator();
1408 auto& use_node = dominators[use_block];
1409 if (use_node == nullptr) {
1410 if (dominators.size() == 1u) {
1411 // Place the {node} at {use_block}.
1414 Trace(" pushing #%d:%s down to B%d\n", node->id(),
1415 node->op()->mnemonic(), block->id().ToInt());
1417 // Place a copy of {node} at {use_block}.
1418 use_node = CloneNode(node);
1419 Trace(" cloning #%d:%s for B%d\n", use_node->id(),
1420 use_node->op()->mnemonic(), use_block->id().ToInt());
1421 scheduler_->schedule_queue_.push(use_node);
1424 edge.UpdateTo(use_node);
1429 BasicBlock* GetPreHeader(BasicBlock* block) {
1430 if (block->IsLoopHeader()) {
1431 return block->dominator();
1432 } else if (block->loop_header() != NULL) {
1433 return block->loop_header()->dominator();
1439 BasicBlock* GetCommonDominatorOfUses(Node* node) {
1440 BasicBlock* block = nullptr;
1441 for (Edge edge : node->use_edges()) {
1442 BasicBlock* use_block = GetBlockForUse(edge);
1443 block = block == NULL ? use_block : use_block == NULL
1445 : BasicBlock::GetCommonDominator(
1451 BasicBlock* GetBlockForUse(Edge edge) {
1452 Node* use = edge.from();
1453 IrOpcode::Value opcode = use->opcode();
1454 if (IrOpcode::IsPhiOpcode(opcode)) {
1455 // If the use is from a coupled (i.e. floating) phi, compute the common
1456 // dominator of its uses. This will not recurse more than one level.
1457 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1458 Trace(" inspecting uses of coupled #%d:%s\n", use->id(),
1459 use->op()->mnemonic());
1460 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1461 return GetCommonDominatorOfUses(use);
1463 // If the use is from a fixed (i.e. non-floating) phi, use the block
1464 // of the corresponding control input to the merge.
1465 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1466 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1467 use->op()->mnemonic());
1468 Node* merge = NodeProperties::GetControlInput(use, 0);
1469 opcode = merge->opcode();
1470 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
1471 use = NodeProperties::GetControlInput(merge, edge.index());
1474 BasicBlock* result = schedule_->block(use);
1475 if (result == NULL) return NULL;
1476 Trace(" must dominate use #%d:%s in B%d\n", use->id(),
1477 use->op()->mnemonic(), result->id().ToInt());
1481 void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1482 scheduler_->FuseFloatingControl(block, node);
1485 void ScheduleNode(BasicBlock* block, Node* node) {
1486 schedule_->PlanNode(block, node);
1487 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
1488 scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1491 Node* CloneNode(Node* node) {
1492 int const input_count = node->InputCount();
1493 Node** const inputs = scheduler_->zone_->NewArray<Node*>(input_count);
1494 for (int index = 0; index < input_count; ++index) {
1495 Node* const input = node->InputAt(index);
1496 scheduler_->IncrementUnscheduledUseCount(input, index, node);
1497 inputs[index] = input;
1499 Node* copy = scheduler_->graph_->NewNode(node->op(), input_count, inputs);
1500 scheduler_->node_data_.resize(copy->id() + 1,
1501 scheduler_->DefaultSchedulerData());
1502 scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1506 Scheduler* scheduler_;
1507 Schedule* schedule_;
1509 ZoneDeque<BasicBlock*> marking_queue_;
1513 void Scheduler::ScheduleLate() {
1514 Trace("--- SCHEDULE LATE ------------------------------------------\n");
1515 if (FLAG_trace_turbo_scheduler) {
1517 for (Node* node : schedule_root_nodes_) {
1518 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1523 // Schedule: Places nodes in dominator block of all their uses.
1524 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1525 schedule_late_visitor.Run(&schedule_root_nodes_);
1529 // -----------------------------------------------------------------------------
1530 // Phase 6: Seal the final schedule.
1533 void Scheduler::SealFinalSchedule() {
1534 Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1536 // Serialize the assembly order and reverse-post-order numbering.
1537 special_rpo_->SerializeRPOIntoSchedule();
1538 special_rpo_->PrintAndVerifySpecialRPO();
1540 // Add collected nodes for basic blocks to their blocks in the right order.
1542 for (NodeVector& nodes : scheduled_nodes_) {
1543 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1544 BasicBlock* block = schedule_->GetBlockById(id);
1545 for (auto i = nodes.rbegin(); i != nodes.rend(); ++i) {
1546 schedule_->AddNode(block, *i);
1552 // -----------------------------------------------------------------------------
1555 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1556 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
1557 if (FLAG_trace_turbo_scheduler) {
1558 OFStream os(stdout);
1559 os << "Schedule before control flow fusion:\n" << *schedule_;
1562 // Iterate on phase 1: Build control-flow graph.
1563 control_flow_builder_->Run(block, node);
1565 // Iterate on phase 2: Compute special RPO and dominator tree.
1566 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1567 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
1568 for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
1569 b->set_dominator_depth(-1);
1570 b->set_dominator(NULL);
1572 PropagateImmediateDominators(block->rpo_next());
1574 // Iterate on phase 4: Schedule nodes early.
1575 // TODO(mstarzinger): The following loop gathering the propagation roots is a
1576 // temporary solution and should be merged into the rest of the scheduler as
1577 // soon as the approach settled for all floating loops.
1578 NodeVector propagation_roots(control_flow_builder_->control_);
1579 for (Node* node : control_flow_builder_->control_) {
1580 for (Node* use : node->uses()) {
1581 if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
1584 if (FLAG_trace_turbo_scheduler) {
1585 Trace("propagation roots: ");
1586 for (Node* node : propagation_roots) {
1587 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1591 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1592 schedule_early_visitor.Run(&propagation_roots);
1594 // Move previously planned nodes.
1595 // TODO(mstarzinger): Improve that by supporting bulk moves.
1596 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
1597 MovePlannedNodes(block, schedule_->block(node));
1599 if (FLAG_trace_turbo_scheduler) {
1600 OFStream os(stdout);
1601 os << "Schedule after control flow fusion:\n" << *schedule_;
1606 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1607 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
1609 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
1610 for (Node* const node : *nodes) {
1611 schedule_->SetBlockForNode(to, node);
1612 scheduled_nodes_[to->id().ToSize()].push_back(node);
1617 } // namespace compiler
1618 } // namespace internal