1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/linkage.h"
6 #include "src/compiler/register-allocator.h"
7 #include "src/string-stream.h"
15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
18 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
19 return a.Value() < b.Value() ? a : b;
23 static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
24 return a.Value() > b.Value() ? a : b;
28 static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
29 auto it = std::find(v->begin(), v->end(), range);
30 DCHECK(it != v->end());
35 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
36 InstructionOperand* hint)
37 : operand_(operand), hint_(hint), pos_(pos), next_(nullptr), flags_(0) {
38 bool register_beneficial = true;
39 UsePositionType type = UsePositionType::kAny;
40 if (operand_ != nullptr && operand_->IsUnallocated()) {
41 const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
42 if (unalloc->HasRegisterPolicy()) {
43 type = UsePositionType::kRequiresRegister;
44 } else if (unalloc->HasSlotPolicy()) {
45 type = UsePositionType::kRequiresSlot;
46 register_beneficial = false;
48 register_beneficial = !unalloc->HasAnyPolicy();
51 flags_ = TypeField::encode(type) |
52 RegisterBeneficialField::encode(register_beneficial);
53 DCHECK(pos_.IsValid());
57 bool UsePosition::HasHint() const {
58 return hint_ != nullptr && !hint_->IsUnallocated();
62 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
63 DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
64 flags_ = TypeField::encode(type) |
65 RegisterBeneficialField::encode(register_beneficial);
69 void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
70 DCHECK(Contains(pos) && pos.Value() != start().Value());
71 auto after = new (zone) UseInterval(pos, end_);
78 struct LiveRange::SpillAtDefinitionList : ZoneObject {
79 SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
80 SpillAtDefinitionList* next)
81 : gap_index(gap_index), operand(operand), next(next) {}
83 InstructionOperand* const operand;
84 SpillAtDefinitionList* const next;
91 void LiveRange::Verify() const {
92 UsePosition* cur = first_pos_;
93 while (cur != nullptr) {
94 DCHECK(Start().Value() <= cur->pos().Value() &&
95 cur->pos().Value() <= End().Value());
101 bool LiveRange::HasOverlap(UseInterval* target) const {
102 UseInterval* current_interval = first_interval_;
103 while (current_interval != nullptr) {
104 // Intervals overlap if the start of one is contained in the other.
105 if (current_interval->Contains(target->start()) ||
106 target->Contains(current_interval->start())) {
109 current_interval = current_interval->next();
118 LiveRange::LiveRange(int id, Zone* zone)
121 has_slot_use_(false),
123 is_non_loop_phi_(false),
124 kind_(UNALLOCATED_REGISTERS),
125 assigned_register_(kInvalidAssignment),
126 last_interval_(nullptr),
127 first_interval_(nullptr),
131 current_interval_(nullptr),
132 last_processed_use_(nullptr),
133 current_hint_operand_(nullptr),
134 spill_start_index_(kMaxInt),
135 spill_type_(SpillType::kNoSpillType),
136 spill_operand_(nullptr),
137 spills_at_definition_(nullptr) {}
140 void LiveRange::set_assigned_register(int reg,
141 InstructionOperandCache* operand_cache) {
142 DCHECK(!HasRegisterAssigned() && !IsSpilled());
143 assigned_register_ = reg;
144 // TODO(dcarney): stop aliasing hint operands.
145 ConvertUsesToOperand(GetAssignedOperand(operand_cache), nullptr);
149 void LiveRange::MakeSpilled() {
150 DCHECK(!IsSpilled());
151 DCHECK(!TopLevel()->HasNoSpillType());
153 assigned_register_ = kInvalidAssignment;
157 void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
158 InstructionOperand* operand) {
159 DCHECK(HasNoSpillType());
160 spills_at_definition_ = new (zone)
161 SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
165 void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
166 InstructionOperand* op,
167 bool might_be_duplicated) {
169 auto zone = sequence->zone();
170 for (auto to_spill = spills_at_definition_; to_spill != nullptr;
171 to_spill = to_spill->next) {
172 auto gap = sequence->GapAt(to_spill->gap_index);
173 auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
174 // Skip insertion if it's possible that the move exists already as a
175 // constraint move from a fixed output register to a slot.
176 if (might_be_duplicated) {
178 auto move_ops = move->move_operands();
179 for (auto move_op = move_ops->begin(); move_op != move_ops->end();
181 if (move_op->IsEliminated()) continue;
182 if (move_op->source()->Equals(to_spill->operand) &&
183 move_op->destination()->Equals(op)) {
190 move->AddMove(to_spill->operand, op, zone);
195 void LiveRange::SetSpillOperand(InstructionOperand* operand) {
196 DCHECK(HasNoSpillType());
197 DCHECK(!operand->IsUnallocated());
198 spill_type_ = SpillType::kSpillOperand;
199 spill_operand_ = operand;
203 void LiveRange::SetSpillRange(SpillRange* spill_range) {
204 DCHECK(HasNoSpillType() || HasSpillRange());
206 spill_type_ = SpillType::kSpillRange;
207 spill_range_ = spill_range;
211 void LiveRange::CommitSpillOperand(InstructionOperand* operand) {
212 DCHECK(HasSpillRange());
213 DCHECK(!operand->IsUnallocated());
215 spill_type_ = SpillType::kSpillOperand;
216 spill_operand_ = operand;
220 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
221 UsePosition* use_pos = last_processed_use_;
222 if (use_pos == nullptr) use_pos = first_pos();
223 while (use_pos != nullptr && use_pos->pos().Value() < start.Value()) {
224 use_pos = use_pos->next();
226 last_processed_use_ = use_pos;
231 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
232 LifetimePosition start) {
233 UsePosition* pos = NextUsePosition(start);
234 while (pos != nullptr && !pos->RegisterIsBeneficial()) {
241 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
242 LifetimePosition start) {
243 auto pos = first_pos();
244 UsePosition* prev = nullptr;
245 while (pos != nullptr && pos->pos().Value() < start.Value()) {
246 if (pos->RegisterIsBeneficial()) prev = pos;
253 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
254 UsePosition* pos = NextUsePosition(start);
255 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
262 bool LiveRange::CanBeSpilled(LifetimePosition pos) {
263 // We cannot spill a live range that has a use requiring a register
264 // at the current or the immediate next position.
265 auto use_pos = NextRegisterPosition(pos);
266 if (use_pos == nullptr) return true;
267 return use_pos->pos().Value() >
268 pos.NextInstruction().InstructionEnd().Value();
272 InstructionOperand* LiveRange::GetAssignedOperand(
273 InstructionOperandCache* cache) const {
274 if (HasRegisterAssigned()) {
275 DCHECK(!IsSpilled());
277 case GENERAL_REGISTERS:
278 return cache->RegisterOperand(assigned_register());
279 case DOUBLE_REGISTERS:
280 return cache->DoubleRegisterOperand(assigned_register());
286 DCHECK(!HasRegisterAssigned());
287 auto op = TopLevel()->GetSpillOperand();
288 DCHECK(!op->IsUnallocated());
293 InstructionOperand LiveRange::GetAssignedOperand() const {
294 if (HasRegisterAssigned()) {
295 DCHECK(!IsSpilled());
297 case GENERAL_REGISTERS:
298 return RegisterOperand(assigned_register());
299 case DOUBLE_REGISTERS:
300 return DoubleRegisterOperand(assigned_register());
306 DCHECK(!HasRegisterAssigned());
307 auto op = TopLevel()->GetSpillOperand();
308 DCHECK(!op->IsUnallocated());
313 UseInterval* LiveRange::FirstSearchIntervalForPosition(
314 LifetimePosition position) const {
315 if (current_interval_ == nullptr) return first_interval_;
316 if (current_interval_->start().Value() > position.Value()) {
317 current_interval_ = nullptr;
318 return first_interval_;
320 return current_interval_;
324 void LiveRange::AdvanceLastProcessedMarker(
325 UseInterval* to_start_of, LifetimePosition but_not_past) const {
326 if (to_start_of == nullptr) return;
327 if (to_start_of->start().Value() > but_not_past.Value()) return;
328 auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
329 : current_interval_->start();
330 if (to_start_of->start().Value() > start.Value()) {
331 current_interval_ = to_start_of;
336 void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
338 DCHECK(Start().Value() < position.Value());
339 DCHECK(result->IsEmpty());
340 // Find the last interval that ends before the position. If the
341 // position is contained in one of the intervals in the chain, we
342 // split that interval and use the first part.
343 auto current = FirstSearchIntervalForPosition(position);
345 // If the split position coincides with the beginning of a use interval
346 // we need to split use positons in a special way.
347 bool split_at_start = false;
349 if (current->start().Value() == position.Value()) {
350 // When splitting at start we need to locate the previous use interval.
351 current = first_interval_;
354 while (current != nullptr) {
355 if (current->Contains(position)) {
356 current->SplitAt(position, zone);
359 auto next = current->next();
360 if (next->start().Value() >= position.Value()) {
361 split_at_start = (next->start().Value() == position.Value());
367 // Partition original use intervals to the two live ranges.
368 auto before = current;
369 auto after = before->next();
370 result->last_interval_ =
371 (last_interval_ == before)
372 ? after // Only interval in the range after split.
373 : last_interval_; // Last interval of the original range.
374 result->first_interval_ = after;
375 last_interval_ = before;
377 // Find the last use position before the split and the first use
378 // position after it.
379 auto use_after = first_pos_;
380 UsePosition* use_before = nullptr;
381 if (split_at_start) {
382 // The split position coincides with the beginning of a use interval (the
383 // end of a lifetime hole). Use at this position should be attributed to
384 // the split child because split child owns use interval covering it.
385 while (use_after != nullptr &&
386 use_after->pos().Value() < position.Value()) {
387 use_before = use_after;
388 use_after = use_after->next();
391 while (use_after != nullptr &&
392 use_after->pos().Value() <= position.Value()) {
393 use_before = use_after;
394 use_after = use_after->next();
398 // Partition original use positions to the two live ranges.
399 if (use_before != nullptr) {
400 use_before->next_ = nullptr;
402 first_pos_ = nullptr;
404 result->first_pos_ = use_after;
406 // Discard cached iteration state. It might be pointing
407 // to the use that no longer belongs to this live range.
408 last_processed_use_ = nullptr;
409 current_interval_ = nullptr;
411 // Link the new live range in the chain before any of the other
412 // ranges linked from the range before the split.
413 result->parent_ = (parent_ == nullptr) ? this : parent_;
414 result->kind_ = result->parent_->kind_;
415 result->next_ = next_;
425 // This implements an ordering on live ranges so that they are ordered by their
426 // start positions. This is needed for the correctness of the register
427 // allocation algorithm. If two live ranges start at the same offset then there
428 // is a tie breaker based on where the value is first used. This part of the
429 // ordering is merely a heuristic.
430 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
431 LifetimePosition start = Start();
432 LifetimePosition other_start = other->Start();
433 if (start.Value() == other_start.Value()) {
434 UsePosition* pos = first_pos();
435 if (pos == nullptr) return false;
436 UsePosition* other_pos = other->first_pos();
437 if (other_pos == nullptr) return true;
438 return pos->pos().Value() < other_pos->pos().Value();
440 return start.Value() < other_start.Value();
444 void LiveRange::ShortenTo(LifetimePosition start) {
445 TRACE("Shorten live range %d to [%d\n", id_, start.Value());
446 DCHECK(first_interval_ != nullptr);
447 DCHECK(first_interval_->start().Value() <= start.Value());
448 DCHECK(start.Value() < first_interval_->end().Value());
449 first_interval_->set_start(start);
453 void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
455 TRACE("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
458 while (first_interval_ != nullptr &&
459 first_interval_->start().Value() <= end.Value()) {
460 if (first_interval_->end().Value() > end.Value()) {
461 new_end = first_interval_->end();
463 first_interval_ = first_interval_->next();
466 auto new_interval = new (zone) UseInterval(start, new_end);
467 new_interval->next_ = first_interval_;
468 first_interval_ = new_interval;
469 if (new_interval->next() == nullptr) {
470 last_interval_ = new_interval;
475 void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
477 TRACE("Add to live range %d interval [%d %d[\n", id_, start.Value(),
479 if (first_interval_ == nullptr) {
480 auto interval = new (zone) UseInterval(start, end);
481 first_interval_ = interval;
482 last_interval_ = interval;
484 if (end.Value() == first_interval_->start().Value()) {
485 first_interval_->set_start(start);
486 } else if (end.Value() < first_interval_->start().Value()) {
487 auto interval = new (zone) UseInterval(start, end);
488 interval->set_next(first_interval_);
489 first_interval_ = interval;
491 // Order of instruction's processing (see ProcessInstructions) guarantees
492 // that each new use interval either precedes or intersects with
493 // last added interval.
494 DCHECK(start.Value() < first_interval_->end().Value());
495 first_interval_->start_ = Min(start, first_interval_->start_);
496 first_interval_->end_ = Max(end, first_interval_->end_);
502 void LiveRange::AddUsePosition(LifetimePosition pos,
503 InstructionOperand* operand,
504 InstructionOperand* hint, Zone* zone) {
505 TRACE("Add to live range %d use position %d\n", id_, pos.Value());
506 auto use_pos = new (zone) UsePosition(pos, operand, hint);
507 UsePosition* prev_hint = nullptr;
508 UsePosition* prev = nullptr;
509 auto current = first_pos_;
510 while (current != nullptr && current->pos().Value() < pos.Value()) {
511 prev_hint = current->HasHint() ? current : prev_hint;
513 current = current->next();
516 if (prev == nullptr) {
517 use_pos->set_next(first_pos_);
518 first_pos_ = use_pos;
520 use_pos->next_ = prev->next_;
521 prev->next_ = use_pos;
524 if (prev_hint == nullptr && use_pos->HasHint()) {
525 current_hint_operand_ = hint;
530 void LiveRange::ConvertUsesToOperand(InstructionOperand* op,
531 InstructionOperand* spill_op) {
532 for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
533 DCHECK(Start().Value() <= pos->pos().Value() &&
534 pos->pos().Value() <= End().Value());
535 if (!pos->HasOperand()) {
538 switch (pos->type()) {
539 case UsePositionType::kRequiresSlot:
540 if (spill_op != nullptr) {
541 pos->operand()->ConvertTo(spill_op->kind(), spill_op->index());
544 case UsePositionType::kRequiresRegister:
545 DCHECK(op->IsRegister() || op->IsDoubleRegister());
547 case UsePositionType::kAny:
548 pos->operand()->ConvertTo(op->kind(), op->index());
555 bool LiveRange::CanCover(LifetimePosition position) const {
556 if (IsEmpty()) return false;
557 return Start().Value() <= position.Value() &&
558 position.Value() < End().Value();
562 bool LiveRange::Covers(LifetimePosition position) {
563 if (!CanCover(position)) return false;
564 auto start_search = FirstSearchIntervalForPosition(position);
565 for (auto interval = start_search; interval != nullptr;
566 interval = interval->next()) {
567 DCHECK(interval->next() == nullptr ||
568 interval->next()->start().Value() >= interval->start().Value());
569 AdvanceLastProcessedMarker(interval, position);
570 if (interval->Contains(position)) return true;
571 if (interval->start().Value() > position.Value()) return false;
577 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
578 auto b = other->first_interval();
579 if (b == nullptr) return LifetimePosition::Invalid();
580 auto advance_last_processed_up_to = b->start();
581 auto a = FirstSearchIntervalForPosition(b->start());
582 while (a != nullptr && b != nullptr) {
583 if (a->start().Value() > other->End().Value()) break;
584 if (b->start().Value() > End().Value()) break;
585 auto cur_intersection = a->Intersect(b);
586 if (cur_intersection.IsValid()) {
587 return cur_intersection;
589 if (a->start().Value() < b->start().Value()) {
591 if (a == nullptr || a->start().Value() > other->End().Value()) break;
592 AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
597 return LifetimePosition::Invalid();
601 InstructionOperandCache::InstructionOperandCache() {
602 for (size_t i = 0; i < arraysize(general_register_operands_); ++i) {
603 general_register_operands_[i] =
604 i::compiler::RegisterOperand(static_cast<int>(i));
606 for (size_t i = 0; i < arraysize(double_register_operands_); ++i) {
607 double_register_operands_[i] =
608 i::compiler::DoubleRegisterOperand(static_cast<int>(i));
613 RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
614 Zone* zone, Frame* frame,
615 InstructionSequence* code,
616 const char* debug_name)
620 debug_name_(debug_name),
622 operand_cache_(new (code_zone()) InstructionOperandCache()),
623 phi_map_(local_zone()),
624 live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
625 live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
626 fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
628 fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
630 unhandled_live_ranges_(local_zone()),
631 active_live_ranges_(local_zone()),
632 inactive_live_ranges_(local_zone()),
633 spill_ranges_(local_zone()),
634 mode_(UNALLOCATED_REGISTERS),
636 DCHECK(this->config()->num_general_registers() <=
637 RegisterConfiguration::kMaxGeneralRegisters);
638 DCHECK(this->config()->num_double_registers() <=
639 RegisterConfiguration::kMaxDoubleRegisters);
640 // TryAllocateFreeReg and AllocateBlockedReg assume this
641 // when allocating local arrays.
642 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
643 this->config()->num_general_registers());
644 unhandled_live_ranges().reserve(
645 static_cast<size_t>(code->VirtualRegisterCount() * 2));
646 active_live_ranges().reserve(8);
647 inactive_live_ranges().reserve(8);
648 spill_ranges().reserve(8);
649 assigned_registers_ =
650 new (code_zone()) BitVector(config->num_general_registers(), code_zone());
651 assigned_double_registers_ = new (code_zone())
652 BitVector(config->num_aliased_double_registers(), code_zone());
653 frame->SetAllocatedRegisters(assigned_registers_);
654 frame->SetAllocatedDoubleRegisters(assigned_double_registers_);
658 BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) {
659 // Compute live out for the given block, except not including backward
661 auto live_out = new (local_zone())
662 BitVector(code()->VirtualRegisterCount(), local_zone());
664 // Process all successor blocks.
665 for (auto succ : block->successors()) {
666 // Add values live on entry to the successor. Note the successor's
667 // live_in will not be computed yet for backwards edges.
668 auto live_in = live_in_sets_[succ.ToSize()];
669 if (live_in != nullptr) live_out->Union(*live_in);
671 // All phi input operands corresponding to this successor edge are live
672 // out from this block.
673 auto successor = code()->InstructionBlockAt(succ);
674 size_t index = successor->PredecessorIndexOf(block->rpo_number());
675 DCHECK(index < successor->PredecessorCount());
676 for (auto phi : successor->phis()) {
677 live_out->Add(phi->operands()[index]);
684 void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
685 BitVector* live_out) {
686 // Add an interval that includes the entire block to the live range for
687 // each live_out value.
689 LifetimePosition::FromInstructionIndex(block->first_instruction_index());
690 auto end = LifetimePosition::FromInstructionIndex(
691 block->last_instruction_index()).NextInstruction();
692 BitVector::Iterator iterator(live_out);
693 while (!iterator.Done()) {
694 int operand_index = iterator.Current();
695 auto range = LiveRangeFor(operand_index);
696 range->AddUseInterval(start, end, local_zone());
702 int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
703 return -index - 1 - config()->num_general_registers();
707 InstructionOperand* RegisterAllocator::AllocateFixed(
708 UnallocatedOperand* operand, int pos, bool is_tagged) {
709 TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
710 DCHECK(operand->HasFixedPolicy());
711 if (operand->HasFixedSlotPolicy()) {
712 operand->ConvertTo(InstructionOperand::STACK_SLOT,
713 operand->fixed_slot_index());
714 } else if (operand->HasFixedRegisterPolicy()) {
715 int reg_index = operand->fixed_register_index();
716 operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
717 } else if (operand->HasFixedDoubleRegisterPolicy()) {
718 int reg_index = operand->fixed_register_index();
719 operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
724 TRACE("Fixed reg is tagged at %d\n", pos);
725 auto instr = InstructionAt(pos);
726 if (instr->HasPointerMap()) {
727 instr->pointer_map()->RecordPointer(operand, code_zone());
734 LiveRange* RegisterAllocator::NewLiveRange(int index) {
735 // The LiveRange object itself can go in the local zone, but the
736 // InstructionOperand needs to go in the code zone, since it may survive
737 // register allocation.
738 return new (local_zone()) LiveRange(index, code_zone());
742 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
743 DCHECK(index < config()->num_general_registers());
744 auto result = fixed_live_ranges()[index];
745 if (result == nullptr) {
746 result = NewLiveRange(FixedLiveRangeID(index));
747 DCHECK(result->IsFixed());
748 result->kind_ = GENERAL_REGISTERS;
749 SetLiveRangeAssignedRegister(result, index);
750 fixed_live_ranges()[index] = result;
756 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
757 DCHECK(index < config()->num_aliased_double_registers());
758 auto result = fixed_double_live_ranges()[index];
759 if (result == nullptr) {
760 result = NewLiveRange(FixedDoubleLiveRangeID(index));
761 DCHECK(result->IsFixed());
762 result->kind_ = DOUBLE_REGISTERS;
763 SetLiveRangeAssignedRegister(result, index);
764 fixed_double_live_ranges()[index] = result;
770 LiveRange* RegisterAllocator::LiveRangeFor(int index) {
771 if (index >= static_cast<int>(live_ranges().size())) {
772 live_ranges().resize(index + 1, nullptr);
774 auto result = live_ranges()[index];
775 if (result == nullptr) {
776 result = NewLiveRange(index);
777 live_ranges()[index] = result;
783 GapInstruction* RegisterAllocator::GetLastGap(const InstructionBlock* block) {
784 int last_instruction = block->last_instruction_index();
785 return code()->GapAt(last_instruction - 1);
789 LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
790 if (operand->IsUnallocated()) {
791 return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
792 } else if (operand->IsRegister()) {
793 return FixedLiveRangeFor(operand->index());
794 } else if (operand->IsDoubleRegister()) {
795 return FixedDoubleLiveRangeFor(operand->index());
802 void RegisterAllocator::Define(LifetimePosition position,
803 InstructionOperand* operand,
804 InstructionOperand* hint) {
805 auto range = LiveRangeFor(operand);
806 if (range == nullptr) return;
808 if (range->IsEmpty() || range->Start().Value() > position.Value()) {
809 // Can happen if there is a definition without use.
810 range->AddUseInterval(position, position.NextInstruction(), local_zone());
811 range->AddUsePosition(position.NextInstruction(), nullptr, nullptr,
814 range->ShortenTo(position);
817 if (operand->IsUnallocated()) {
818 auto unalloc_operand = UnallocatedOperand::cast(operand);
819 range->AddUsePosition(position, unalloc_operand, hint, local_zone());
824 void RegisterAllocator::Use(LifetimePosition block_start,
825 LifetimePosition position,
826 InstructionOperand* operand,
827 InstructionOperand* hint) {
828 auto range = LiveRangeFor(operand);
829 if (range == nullptr) return;
830 if (operand->IsUnallocated()) {
831 UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
832 range->AddUsePosition(position, unalloc_operand, hint, local_zone());
834 range->AddUseInterval(block_start, position, local_zone());
838 void RegisterAllocator::AddGapMove(int index,
839 GapInstruction::InnerPosition position,
840 InstructionOperand* from,
841 InstructionOperand* to) {
842 auto gap = code()->GapAt(index);
843 auto move = gap->GetOrCreateParallelMove(position, code_zone());
844 move->AddMove(from, to, code_zone());
848 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
849 UseInterval* interval2) {
850 while (interval1 != nullptr && interval2 != nullptr) {
851 if (interval1->start().Value() < interval2->start().Value()) {
852 if (interval1->end().Value() > interval2->start().Value()) {
855 interval1 = interval1->next();
857 if (interval2->end().Value() > interval1->start().Value()) {
860 interval2 = interval2->next();
867 SpillRange::SpillRange(LiveRange* range, Zone* zone) : live_ranges_(zone) {
868 auto src = range->first_interval();
869 UseInterval* result = nullptr;
870 UseInterval* node = nullptr;
872 while (src != nullptr) {
873 auto new_node = new (zone) UseInterval(src->start(), src->end());
874 if (result == nullptr) {
877 node->set_next(new_node);
882 use_interval_ = result;
883 live_ranges().push_back(range);
884 end_position_ = node->end();
885 DCHECK(!range->HasSpillRange());
886 range->SetSpillRange(this);
890 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
891 if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
892 this->End().Value() <= other->use_interval_->start().Value() ||
893 other->End().Value() <= this->use_interval_->start().Value()) {
896 return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
900 bool SpillRange::TryMerge(SpillRange* other) {
901 if (Kind() != other->Kind() || IsIntersectingWith(other)) return false;
903 auto max = LifetimePosition::MaxPosition();
904 if (End().Value() < other->End().Value() &&
905 other->End().Value() != max.Value()) {
906 end_position_ = other->End();
908 other->end_position_ = max;
910 MergeDisjointIntervals(other->use_interval_);
911 other->use_interval_ = nullptr;
913 for (auto range : other->live_ranges()) {
914 DCHECK(range->GetSpillRange() == other);
915 range->SetSpillRange(this);
918 live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
919 other->live_ranges().end());
920 other->live_ranges().clear();
926 void SpillRange::SetOperand(InstructionOperand* op) {
927 for (auto range : live_ranges()) {
928 DCHECK(range->GetSpillRange() == this);
929 range->CommitSpillOperand(op);
934 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
935 UseInterval* tail = nullptr;
936 auto current = use_interval_;
937 while (other != nullptr) {
938 // Make sure the 'current' list starts first
939 if (current == nullptr ||
940 current->start().Value() > other->start().Value()) {
941 std::swap(current, other);
943 // Check disjointness
944 DCHECK(other == nullptr ||
945 current->end().Value() <= other->start().Value());
946 // Append the 'current' node to the result accumulator and move forward
947 if (tail == nullptr) {
948 use_interval_ = current;
950 tail->set_next(current);
953 current = current->next();
955 // Other list is empty => we are done
959 void RegisterAllocator::AssignSpillSlots() {
960 // Merge disjoint spill ranges
961 for (size_t i = 0; i < spill_ranges().size(); i++) {
962 auto range = spill_ranges()[i];
963 if (range->IsEmpty()) continue;
964 for (size_t j = i + 1; j < spill_ranges().size(); j++) {
965 auto other = spill_ranges()[j];
966 if (!other->IsEmpty()) {
967 range->TryMerge(other);
972 // Allocate slots for the merged spill ranges.
973 for (auto range : spill_ranges()) {
974 if (range->IsEmpty()) continue;
975 // Allocate a new operand referring to the spill slot.
976 auto kind = range->Kind();
977 int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
978 auto op_kind = kind == DOUBLE_REGISTERS
979 ? InstructionOperand::DOUBLE_STACK_SLOT
980 : InstructionOperand::STACK_SLOT;
981 auto op = InstructionOperand::New(code_zone(), op_kind, index);
982 range->SetOperand(op);
987 void RegisterAllocator::CommitAssignment() {
988 for (auto range : live_ranges()) {
989 if (range == nullptr || range->IsEmpty()) continue;
990 auto assigned = range->GetAssignedOperand(operand_cache());
991 InstructionOperand* spill_operand = nullptr;
992 if (!range->TopLevel()->HasNoSpillType()) {
993 spill_operand = range->TopLevel()->GetSpillOperand();
995 range->ConvertUsesToOperand(assigned, spill_operand);
996 if (!range->IsChild() && spill_operand != nullptr) {
997 range->CommitSpillsAtDefinition(code(), spill_operand,
998 range->has_slot_use());
1004 SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
1005 auto spill_range = new (local_zone()) SpillRange(range, local_zone());
1006 spill_ranges().push_back(spill_range);
1011 bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
1012 if (range->IsChild() || !range->is_phi()) return false;
1013 DCHECK(!range->HasSpillOperand());
1015 auto lookup = phi_map_.find(range->id());
1016 DCHECK(lookup != phi_map_.end());
1017 auto phi = lookup->second.phi;
1018 auto block = lookup->second.block;
1019 // Count the number of spilled operands.
1020 size_t spilled_count = 0;
1021 LiveRange* first_op = nullptr;
1022 for (size_t i = 0; i < phi->operands().size(); i++) {
1023 int op = phi->operands()[i];
1024 LiveRange* op_range = LiveRangeFor(op);
1025 if (op_range->GetSpillRange() == nullptr) continue;
1026 auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
1028 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1029 while (op_range != nullptr && !op_range->CanCover(pred_end)) {
1030 op_range = op_range->next();
1032 if (op_range != nullptr && op_range->IsSpilled()) {
1034 if (first_op == nullptr) {
1035 first_op = op_range->TopLevel();
1040 // Only continue if more than half of the operands are spilled.
1041 if (spilled_count * 2 <= phi->operands().size()) {
1045 // Try to merge the spilled operands and count the number of merged spilled
1047 DCHECK(first_op != nullptr);
1048 auto first_op_spill = first_op->GetSpillRange();
1049 size_t num_merged = 1;
1050 for (size_t i = 1; i < phi->operands().size(); i++) {
1051 int op = phi->operands()[i];
1052 auto op_range = LiveRangeFor(op);
1053 auto op_spill = op_range->GetSpillRange();
1054 if (op_spill != nullptr &&
1055 (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
1060 // Only continue if enough operands could be merged to the
1062 if (num_merged * 2 <= phi->operands().size() ||
1063 AreUseIntervalsIntersecting(first_op_spill->interval(),
1064 range->first_interval())) {
1068 // If the range does not need register soon, spill it to the merged
1070 auto next_pos = range->Start();
1071 if (code()->IsGapAt(next_pos.InstructionIndex())) {
1072 next_pos = next_pos.NextInstruction();
1074 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
1075 if (pos == nullptr) {
1076 auto spill_range = range->TopLevel()->HasSpillRange()
1077 ? range->TopLevel()->GetSpillRange()
1078 : AssignSpillRangeToLiveRange(range->TopLevel());
1079 CHECK(first_op_spill->TryMerge(spill_range));
1082 } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
1083 auto spill_range = range->TopLevel()->HasSpillRange()
1084 ? range->TopLevel()->GetSpillRange()
1085 : AssignSpillRangeToLiveRange(range->TopLevel());
1086 CHECK(first_op_spill->TryMerge(spill_range));
1087 SpillBetween(range, range->Start(), pos->pos());
1088 DCHECK(UnhandledIsSorted());
1095 void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
1096 int start = block->first_instruction_index();
1097 int end = block->last_instruction_index();
1098 DCHECK_NE(-1, start);
1099 for (int i = start; i <= end; ++i) {
1100 if (code()->IsGapAt(i)) {
1101 Instruction* instr = nullptr;
1102 Instruction* prev_instr = nullptr;
1103 if (i < end) instr = InstructionAt(i + 1);
1104 if (i > start) prev_instr = InstructionAt(i - 1);
1105 MeetConstraintsBetween(prev_instr, instr, i);
1109 // Meet register constraints for the instruction in the end.
1110 if (!code()->IsGapAt(end)) {
1111 MeetRegisterConstraintsForLastInstructionInBlock(block);
1116 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
1117 const InstructionBlock* block) {
1118 int end = block->last_instruction_index();
1119 auto last_instruction = InstructionAt(end);
1120 for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1121 auto output_operand = last_instruction->OutputAt(i);
1122 DCHECK(!output_operand->IsConstant());
1123 auto output = UnallocatedOperand::cast(output_operand);
1124 int output_vreg = output->virtual_register();
1125 auto range = LiveRangeFor(output_vreg);
1126 bool assigned = false;
1127 if (output->HasFixedPolicy()) {
1128 AllocateFixed(output, -1, false);
1129 // This value is produced on the stack, we never need to spill it.
1130 if (output->IsStackSlot()) {
1131 DCHECK(output->index() < frame_->GetSpillSlotCount());
1132 range->SetSpillOperand(output);
1133 range->SetSpillStartIndex(end);
1137 for (auto succ : block->successors()) {
1138 const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1139 DCHECK(successor->PredecessorCount() == 1);
1140 int gap_index = successor->first_instruction_index();
1141 DCHECK(code()->IsGapAt(gap_index));
1143 // Create an unconstrained operand for the same virtual register
1144 // and insert a gap move from the fixed output to the operand.
1145 UnallocatedOperand* output_copy =
1146 UnallocatedOperand(UnallocatedOperand::ANY, output_vreg)
1148 AddGapMove(gap_index, GapInstruction::START, output, output_copy);
1153 for (auto succ : block->successors()) {
1154 const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1155 DCHECK(successor->PredecessorCount() == 1);
1156 int gap_index = successor->first_instruction_index();
1157 range->SpillAtDefinition(local_zone(), gap_index, output);
1158 range->SetSpillStartIndex(gap_index);
1165 void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
1166 Instruction* second,
1168 if (first != nullptr) {
1169 // Handle fixed temporaries.
1170 for (size_t i = 0; i < first->TempCount(); i++) {
1171 auto temp = UnallocatedOperand::cast(first->TempAt(i));
1172 if (temp->HasFixedPolicy()) {
1173 AllocateFixed(temp, gap_index - 1, false);
1177 // Handle constant/fixed output operands.
1178 for (size_t i = 0; i < first->OutputCount(); i++) {
1179 InstructionOperand* output = first->OutputAt(i);
1180 if (output->IsConstant()) {
1181 int output_vreg = output->index();
1182 auto range = LiveRangeFor(output_vreg);
1183 range->SetSpillStartIndex(gap_index - 1);
1184 range->SetSpillOperand(output);
1186 auto first_output = UnallocatedOperand::cast(output);
1187 auto range = LiveRangeFor(first_output->virtual_register());
1188 bool assigned = false;
1189 if (first_output->HasFixedPolicy()) {
1190 auto output_copy = first_output->CopyUnconstrained(code_zone());
1191 bool is_tagged = HasTaggedValue(first_output->virtual_register());
1192 AllocateFixed(first_output, gap_index, is_tagged);
1194 // This value is produced on the stack, we never need to spill it.
1195 if (first_output->IsStackSlot()) {
1196 DCHECK(first_output->index() < frame_->GetSpillSlotCount());
1197 range->SetSpillOperand(first_output);
1198 range->SetSpillStartIndex(gap_index - 1);
1201 AddGapMove(gap_index, GapInstruction::START, first_output,
1205 // Make sure we add a gap move for spilling (if we have not done
1208 range->SpillAtDefinition(local_zone(), gap_index, first_output);
1209 range->SetSpillStartIndex(gap_index);
1215 if (second != nullptr) {
1216 // Handle fixed input operands of second instruction.
1217 for (size_t i = 0; i < second->InputCount(); i++) {
1218 auto input = second->InputAt(i);
1219 if (input->IsImmediate()) continue; // Ignore immediates.
1220 auto cur_input = UnallocatedOperand::cast(input);
1221 if (cur_input->HasFixedPolicy()) {
1222 auto input_copy = cur_input->CopyUnconstrained(code_zone());
1223 bool is_tagged = HasTaggedValue(cur_input->virtual_register());
1224 AllocateFixed(cur_input, gap_index + 1, is_tagged);
1225 AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
1229 // Handle "output same as input" for second instruction.
1230 for (size_t i = 0; i < second->OutputCount(); i++) {
1231 auto output = second->OutputAt(i);
1232 if (!output->IsUnallocated()) continue;
1233 auto second_output = UnallocatedOperand::cast(output);
1234 if (second_output->HasSameAsInputPolicy()) {
1235 DCHECK(i == 0); // Only valid for first output.
1236 UnallocatedOperand* cur_input =
1237 UnallocatedOperand::cast(second->InputAt(0));
1238 int output_vreg = second_output->virtual_register();
1239 int input_vreg = cur_input->virtual_register();
1241 auto input_copy = cur_input->CopyUnconstrained(code_zone());
1242 cur_input->set_virtual_register(second_output->virtual_register());
1243 AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
1245 if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
1246 int index = gap_index + 1;
1247 Instruction* instr = InstructionAt(index);
1248 if (instr->HasPointerMap()) {
1249 instr->pointer_map()->RecordPointer(input_copy, code_zone());
1251 } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
1252 // The input is assumed to immediately have a tagged representation,
1253 // before the pointer map can be used. I.e. the pointer map at the
1254 // instruction will include the output operand (whose value at the
1255 // beginning of the instruction is equal to the input operand). If
1256 // this is not desired, then the pointer map at this instruction needs
1257 // to be adjusted manually.
1265 bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
1266 for (size_t i = 0; i < instr->OutputCount(); i++) {
1267 auto output = instr->OutputAt(i);
1268 if (output->IsRegister() && output->index() == index) return true;
1274 bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
1276 for (size_t i = 0; i < instr->OutputCount(); i++) {
1277 auto output = instr->OutputAt(i);
1278 if (output->IsDoubleRegister() && output->index() == index) return true;
1284 void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
1286 int block_start = block->first_instruction_index();
1287 auto block_start_position =
1288 LifetimePosition::FromInstructionIndex(block_start);
1290 for (int index = block->last_instruction_index(); index >= block_start;
1292 auto curr_position = LifetimePosition::FromInstructionIndex(index);
1293 auto instr = InstructionAt(index);
1294 DCHECK(instr != nullptr);
1295 if (instr->IsGapMoves()) {
1296 // Process the moves of the gap instruction, making their sources live.
1297 auto gap = code()->GapAt(index);
1298 const GapInstruction::InnerPosition kPositions[] = {
1299 GapInstruction::END, GapInstruction::START};
1300 for (auto position : kPositions) {
1301 auto move = gap->GetParallelMove(position);
1302 if (move == nullptr) continue;
1303 if (position == GapInstruction::END) {
1304 curr_position = curr_position.InstructionEnd();
1306 curr_position = curr_position.InstructionStart();
1308 auto move_ops = move->move_operands();
1309 for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
1310 auto from = cur->source();
1311 auto to = cur->destination();
1313 if (to->IsUnallocated()) {
1314 int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
1315 auto to_range = LiveRangeFor(to_vreg);
1316 if (to_range->is_phi()) {
1317 DCHECK(!FLAG_turbo_delay_ssa_decon);
1318 if (to_range->is_non_loop_phi()) {
1319 hint = to_range->current_hint_operand();
1322 if (live->Contains(to_vreg)) {
1323 Define(curr_position, to, from);
1324 live->Remove(to_vreg);
1331 Define(curr_position, to, from);
1333 Use(block_start_position, curr_position, from, hint);
1334 if (from->IsUnallocated()) {
1335 live->Add(UnallocatedOperand::cast(from)->virtual_register());
1340 // Process output, inputs, and temps of this non-gap instruction.
1341 for (size_t i = 0; i < instr->OutputCount(); i++) {
1342 auto output = instr->OutputAt(i);
1343 if (output->IsUnallocated()) {
1345 DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
1346 int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
1347 live->Remove(out_vreg);
1348 } else if (output->IsConstant()) {
1349 int out_vreg = output->index();
1350 live->Remove(out_vreg);
1352 Define(curr_position, output, nullptr);
1355 if (instr->ClobbersRegisters()) {
1356 for (int i = 0; i < config()->num_general_registers(); ++i) {
1357 if (!IsOutputRegisterOf(instr, i)) {
1358 auto range = FixedLiveRangeFor(i);
1359 range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1365 if (instr->ClobbersDoubleRegisters()) {
1366 for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
1367 if (!IsOutputDoubleRegisterOf(instr, i)) {
1368 auto range = FixedDoubleLiveRangeFor(i);
1369 range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1375 for (size_t i = 0; i < instr->InputCount(); i++) {
1376 auto input = instr->InputAt(i);
1377 if (input->IsImmediate()) continue; // Ignore immediates.
1378 LifetimePosition use_pos;
1379 if (input->IsUnallocated() &&
1380 UnallocatedOperand::cast(input)->IsUsedAtStart()) {
1381 use_pos = curr_position;
1383 use_pos = curr_position.InstructionEnd();
1386 if (input->IsUnallocated()) {
1387 UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
1388 int vreg = unalloc->virtual_register();
1390 if (unalloc->HasSlotPolicy()) {
1391 LiveRangeFor(vreg)->set_has_slot_use(true);
1394 Use(block_start_position, use_pos, input, nullptr);
1397 for (size_t i = 0; i < instr->TempCount(); i++) {
1398 auto temp = instr->TempAt(i);
1400 DCHECK_IMPLIES(temp->IsUnallocated(),
1401 !UnallocatedOperand::cast(temp)->HasSlotPolicy());
1402 if (instr->ClobbersTemps()) {
1403 if (temp->IsRegister()) continue;
1404 if (temp->IsUnallocated()) {
1405 UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
1406 if (temp_unalloc->HasFixedPolicy()) {
1411 Use(block_start_position, curr_position.InstructionEnd(), temp,
1413 Define(curr_position, temp, nullptr);
1420 void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
1421 for (auto phi : block->phis()) {
1422 int phi_vreg = phi->virtual_register();
1424 phi_map_.insert(std::make_pair(phi_vreg, PhiMapValue(phi, block)));
1427 auto& output = phi->output();
1428 if (!FLAG_turbo_delay_ssa_decon) {
1429 for (size_t i = 0; i < phi->operands().size(); ++i) {
1430 InstructionBlock* cur_block =
1431 code()->InstructionBlockAt(block->predecessors()[i]);
1432 AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
1433 &phi->inputs()[i], &output);
1434 DCHECK(!InstructionAt(cur_block->last_instruction_index())
1438 auto live_range = LiveRangeFor(phi_vreg);
1439 int gap_index = block->first_instruction_index();
1440 live_range->SpillAtDefinition(local_zone(), gap_index, &output);
1441 live_range->SetSpillStartIndex(gap_index);
1442 // We use the phi-ness of some nodes in some later heuristics.
1443 live_range->set_is_phi(true);
1444 live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1449 void RegisterAllocator::MeetRegisterConstraints() {
1450 for (auto block : code()->instruction_blocks()) {
1451 MeetRegisterConstraints(block);
1456 void RegisterAllocator::ResolvePhis() {
1457 // Process the blocks in reverse order.
1458 for (auto i = code()->instruction_blocks().rbegin();
1459 i != code()->instruction_blocks().rend(); ++i) {
1465 const InstructionBlock* RegisterAllocator::GetInstructionBlock(
1466 LifetimePosition pos) {
1467 return code()->GetInstructionBlock(pos.InstructionIndex());
1471 void RegisterAllocator::ConnectRanges() {
1472 ZoneMap<std::pair<ParallelMove*, InstructionOperand*>, InstructionOperand*>
1473 delayed_insertion_map(local_zone());
1474 for (auto first_range : live_ranges()) {
1475 if (first_range == nullptr || first_range->IsChild()) continue;
1476 for (auto second_range = first_range->next(); second_range != nullptr;
1477 first_range = second_range, second_range = second_range->next()) {
1478 auto pos = second_range->Start();
1479 // Add gap move if the two live ranges touch and there is no block
1481 if (second_range->IsSpilled()) continue;
1482 if (first_range->End().Value() != pos.Value()) continue;
1483 if (IsBlockBoundary(pos) &&
1484 !CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) {
1487 auto prev_operand = first_range->GetAssignedOperand(operand_cache());
1488 auto cur_operand = second_range->GetAssignedOperand(operand_cache());
1489 if (prev_operand->Equals(cur_operand)) continue;
1490 int index = pos.InstructionIndex();
1491 bool delay_insertion = false;
1492 GapInstruction::InnerPosition gap_pos;
1493 int gap_index = index;
1494 if (code()->IsGapAt(index)) {
1495 gap_pos = pos.IsInstructionStart() ? GapInstruction::START
1496 : GapInstruction::END;
1498 gap_index = pos.IsInstructionStart() ? (index - 1) : (index + 1);
1499 delay_insertion = gap_index < index;
1500 gap_pos = delay_insertion ? GapInstruction::END : GapInstruction::START;
1502 auto move = code()->GapAt(gap_index)->GetOrCreateParallelMove(
1503 gap_pos, code_zone());
1504 if (!delay_insertion) {
1505 move->AddMove(prev_operand, cur_operand, code_zone());
1507 delayed_insertion_map.insert(
1508 std::make_pair(std::make_pair(move, prev_operand), cur_operand));
1512 if (delayed_insertion_map.empty()) return;
1513 // Insert all the moves which should occur after the stored move.
1514 ZoneVector<MoveOperands> to_insert(local_zone());
1515 ZoneVector<MoveOperands*> to_eliminate(local_zone());
1516 to_insert.reserve(4);
1517 to_eliminate.reserve(4);
1518 auto move = delayed_insertion_map.begin()->first.first;
1519 for (auto it = delayed_insertion_map.begin();; ++it) {
1520 bool done = it == delayed_insertion_map.end();
1521 if (done || it->first.first != move) {
1522 // Commit the MoveOperands for current ParallelMove.
1523 for (auto move_ops : to_eliminate) {
1524 move_ops->Eliminate();
1526 for (auto move_ops : to_insert) {
1527 move->AddMove(move_ops.source(), move_ops.destination(), code_zone());
1531 to_eliminate.clear();
1533 move = it->first.first;
1535 // Gather all MoveOperands for a single ParallelMove.
1536 MoveOperands move_ops(it->first.second, it->second);
1537 auto eliminate = move->PrepareInsertAfter(&move_ops);
1538 to_insert.push_back(move_ops);
1539 if (eliminate != nullptr) to_eliminate.push_back(eliminate);
1544 bool RegisterAllocator::CanEagerlyResolveControlFlow(
1545 const InstructionBlock* block) const {
1546 if (block->PredecessorCount() != 1) return false;
1547 return block->predecessors()[0].IsNext(block->rpo_number());
1553 class LiveRangeBound {
1555 explicit LiveRangeBound(const LiveRange* range)
1556 : range_(range), start_(range->Start()), end_(range->End()) {
1557 DCHECK(!range->IsEmpty());
1560 bool CanCover(LifetimePosition position) {
1561 return start_.Value() <= position.Value() &&
1562 position.Value() < end_.Value();
1565 const LiveRange* const range_;
1566 const LifetimePosition start_;
1567 const LifetimePosition end_;
1570 DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
1575 const LiveRange* cur_cover_;
1576 const LiveRange* pred_cover_;
1580 class LiveRangeBoundArray {
1582 LiveRangeBoundArray() : length_(0), start_(nullptr) {}
1584 bool ShouldInitialize() { return start_ == nullptr; }
1586 void Initialize(Zone* zone, const LiveRange* const range) {
1588 for (auto i = range; i != nullptr; i = i->next()) length++;
1589 start_ = zone->NewArray<LiveRangeBound>(length);
1592 for (auto i = range; i != nullptr; i = i->next(), ++curr) {
1593 new (curr) LiveRangeBound(i);
1597 LiveRangeBound* Find(const LifetimePosition position) const {
1598 size_t left_index = 0;
1599 size_t right_index = length_;
1601 size_t current_index = left_index + (right_index - left_index) / 2;
1602 DCHECK(right_index > current_index);
1603 auto bound = &start_[current_index];
1604 if (bound->start_.Value() <= position.Value()) {
1605 if (position.Value() < bound->end_.Value()) return bound;
1606 DCHECK(left_index < current_index);
1607 left_index = current_index;
1609 right_index = current_index;
1614 LiveRangeBound* FindPred(const InstructionBlock* pred) {
1616 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1617 return Find(pred_end);
1620 LiveRangeBound* FindSucc(const InstructionBlock* succ) {
1622 LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
1623 return Find(succ_start);
1626 void Find(const InstructionBlock* block, const InstructionBlock* pred,
1627 FindResult* result) const {
1629 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1630 auto bound = Find(pred_end);
1631 result->pred_cover_ = bound->range_;
1632 auto cur_start = LifetimePosition::FromInstructionIndex(
1633 block->first_instruction_index());
1635 if (bound->CanCover(cur_start)) {
1636 result->cur_cover_ = bound->range_;
1639 result->cur_cover_ = Find(cur_start)->range_;
1640 DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
1645 LiveRangeBound* start_;
1647 DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
1651 class LiveRangeFinder {
1653 explicit LiveRangeFinder(const RegisterAllocator& allocator)
1654 : allocator_(allocator),
1655 bounds_length_(static_cast<int>(allocator.live_ranges().size())),
1656 bounds_(allocator.local_zone()->NewArray<LiveRangeBoundArray>(
1658 for (int i = 0; i < bounds_length_; ++i) {
1659 new (&bounds_[i]) LiveRangeBoundArray();
1663 LiveRangeBoundArray* ArrayFor(int operand_index) {
1664 DCHECK(operand_index < bounds_length_);
1665 auto range = allocator_.live_ranges()[operand_index];
1666 DCHECK(range != nullptr && !range->IsEmpty());
1667 auto array = &bounds_[operand_index];
1668 if (array->ShouldInitialize()) {
1669 array->Initialize(allocator_.local_zone(), range);
1675 const RegisterAllocator& allocator_;
1676 const int bounds_length_;
1677 LiveRangeBoundArray* const bounds_;
1679 DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
1685 void RegisterAllocator::ResolveControlFlow() {
1686 // Lazily linearize live ranges in memory for fast lookup.
1687 LiveRangeFinder finder(*this);
1688 for (auto block : code()->instruction_blocks()) {
1689 if (CanEagerlyResolveControlFlow(block)) continue;
1690 if (FLAG_turbo_delay_ssa_decon) {
1692 for (auto phi : block->phis()) {
1694 finder.ArrayFor(phi->virtual_register())->FindSucc(block);
1696 block_bound->range_->GetAssignedOperand(operand_cache());
1697 phi->output().ConvertTo(phi_output->kind(), phi_output->index());
1698 size_t pred_index = 0;
1699 for (auto pred : block->predecessors()) {
1700 const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
1701 auto* pred_bound = finder.ArrayFor(phi->operands()[pred_index])
1702 ->FindPred(pred_block);
1704 pred_bound->range_->GetAssignedOperand(operand_cache());
1705 phi->inputs()[pred_index] = *pred_op;
1706 ResolveControlFlow(block, phi_output, pred_block, pred_op);
1711 auto live = live_in_sets_[block->rpo_number().ToInt()];
1712 BitVector::Iterator iterator(live);
1713 while (!iterator.Done()) {
1714 auto* array = finder.ArrayFor(iterator.Current());
1715 for (auto pred : block->predecessors()) {
1717 const auto* pred_block = code()->InstructionBlockAt(pred);
1718 array->Find(block, pred_block, &result);
1719 if (result.cur_cover_ == result.pred_cover_ ||
1720 result.cur_cover_->IsSpilled())
1722 auto pred_op = result.pred_cover_->GetAssignedOperand(operand_cache());
1723 auto cur_op = result.cur_cover_->GetAssignedOperand(operand_cache());
1724 ResolveControlFlow(block, cur_op, pred_block, pred_op);
1732 void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
1733 InstructionOperand* cur_op,
1734 const InstructionBlock* pred,
1735 InstructionOperand* pred_op) {
1736 if (pred_op->Equals(cur_op)) return;
1738 GapInstruction::InnerPosition position;
1739 if (block->PredecessorCount() == 1) {
1740 gap_index = block->first_instruction_index();
1741 position = GapInstruction::START;
1743 DCHECK(pred->SuccessorCount() == 1);
1744 DCHECK(!InstructionAt(pred->last_instruction_index())->HasPointerMap());
1745 gap_index = pred->last_instruction_index() - 1;
1746 position = GapInstruction::END;
1748 AddGapMove(gap_index, position, pred_op, cur_op);
1752 void RegisterAllocator::BuildLiveRanges() {
1753 // Process the blocks in reverse order.
1754 for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
1756 auto block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
1757 auto live = ComputeLiveOut(block);
1758 // Initially consider all live_out values live for the entire block. We
1759 // will shorten these intervals if necessary.
1760 AddInitialIntervals(block, live);
1762 // Process the instructions in reverse order, generating and killing
1764 ProcessInstructions(block, live);
1765 // All phi output operands are killed by this block.
1766 for (auto phi : block->phis()) {
1767 // The live range interval already ends at the first instruction of the
1769 int phi_vreg = phi->virtual_register();
1770 live->Remove(phi_vreg);
1771 if (!FLAG_turbo_delay_ssa_decon) {
1772 InstructionOperand* hint = nullptr;
1773 InstructionOperand* phi_operand = nullptr;
1775 GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
1777 gap->GetOrCreateParallelMove(GapInstruction::END, code_zone());
1778 for (int j = 0; j < move->move_operands()->length(); ++j) {
1779 auto to = move->move_operands()->at(j).destination();
1780 if (to->IsUnallocated() &&
1781 UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
1782 hint = move->move_operands()->at(j).source();
1787 DCHECK(hint != nullptr);
1788 auto block_start = LifetimePosition::FromInstructionIndex(
1789 block->first_instruction_index());
1790 Define(block_start, phi_operand, hint);
1794 // Now live is live_in for this block except not including values live
1795 // out on backward successor edges.
1796 live_in_sets_[block_id] = live;
1798 if (block->IsLoopHeader()) {
1799 // Add a live range stretching from the first loop instruction to the last
1800 // for each value live on entry to the header.
1801 BitVector::Iterator iterator(live);
1802 auto start = LifetimePosition::FromInstructionIndex(
1803 block->first_instruction_index());
1804 auto end = LifetimePosition::FromInstructionIndex(
1805 code()->LastLoopInstructionIndex(block)).NextInstruction();
1806 while (!iterator.Done()) {
1807 int operand_index = iterator.Current();
1808 auto range = LiveRangeFor(operand_index);
1809 range->EnsureInterval(start, end, local_zone());
1812 // Insert all values into the live in sets of all blocks in the loop.
1813 for (int i = block->rpo_number().ToInt() + 1;
1814 i < block->loop_end().ToInt(); ++i) {
1815 live_in_sets_[i]->Union(*live);
1820 for (auto range : live_ranges()) {
1821 if (range == nullptr) continue;
1822 range->kind_ = RequiredRegisterKind(range->id());
1823 // Give slots to all ranges with a non fixed slot use.
1824 if (range->has_slot_use() && range->HasNoSpillType()) {
1825 AssignSpillRangeToLiveRange(range);
1827 // TODO(bmeurer): This is a horrible hack to make sure that for constant
1828 // live ranges, every use requires the constant to be in a register.
1829 // Without this hack, all uses with "any" policy would get the constant
1830 // operand assigned.
1831 if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
1832 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next_) {
1833 if (pos->type() == UsePositionType::kRequiresSlot) continue;
1834 UsePositionType new_type = UsePositionType::kAny;
1835 // Can't mark phis as needing a register.
1837 ->InstructionAt(pos->pos().InstructionIndex())
1839 new_type = UsePositionType::kRequiresRegister;
1841 pos->set_type(new_type, true);
1848 bool RegisterAllocator::ExistsUseWithoutDefinition() {
1850 BitVector::Iterator iterator(live_in_sets_[0]);
1851 while (!iterator.Done()) {
1853 int operand_index = iterator.Current();
1854 PrintF("Register allocator error: live v%d reached first block.\n",
1856 LiveRange* range = LiveRangeFor(operand_index);
1857 PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
1858 if (debug_name() == nullptr) {
1861 PrintF(" (function: %s)\n", debug_name());
1869 bool RegisterAllocator::SafePointsAreInOrder() const {
1871 for (auto map : *code()->pointer_maps()) {
1872 if (safe_point > map->instruction_position()) return false;
1873 safe_point = map->instruction_position();
1879 void RegisterAllocator::PopulatePointerMaps() {
1880 DCHECK(SafePointsAreInOrder());
1882 // Iterate over all safe point positions and record a pointer
1883 // for all spilled live ranges at this point.
1884 int last_range_start = 0;
1885 auto pointer_maps = code()->pointer_maps();
1886 PointerMapDeque::const_iterator first_it = pointer_maps->begin();
1887 for (LiveRange* range : live_ranges()) {
1888 if (range == nullptr) continue;
1889 // Iterate over the first parts of multi-part live ranges.
1890 if (range->IsChild()) continue;
1891 // Skip non-reference values.
1892 if (!HasTaggedValue(range->id())) continue;
1893 // Skip empty live ranges.
1894 if (range->IsEmpty()) continue;
1896 // Find the extent of the range and its children.
1897 int start = range->Start().InstructionIndex();
1899 for (auto cur = range; cur != nullptr; cur = cur->next()) {
1900 auto this_end = cur->End();
1901 if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
1902 DCHECK(cur->Start().InstructionIndex() >= start);
1905 // Most of the ranges are in order, but not all. Keep an eye on when they
1906 // step backwards and reset the first_it so we don't miss any safe points.
1907 if (start < last_range_start) first_it = pointer_maps->begin();
1908 last_range_start = start;
1910 // Step across all the safe points that are before the start of this range,
1911 // recording how far we step in order to save doing this for the next range.
1912 for (; first_it != pointer_maps->end(); ++first_it) {
1913 auto map = *first_it;
1914 if (map->instruction_position() >= start) break;
1917 // Step through the safe points to see whether they are in the range.
1918 for (auto it = first_it; it != pointer_maps->end(); ++it) {
1920 int safe_point = map->instruction_position();
1922 // The safe points are sorted so we can stop searching here.
1923 if (safe_point - 1 > end) break;
1925 // Advance to the next active range that covers the current
1926 // safe point position.
1927 auto safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point);
1929 while (cur != nullptr && !cur->Covers(safe_point_pos)) {
1932 if (cur == nullptr) continue;
1934 // Check if the live range is spilled and the safe point is after
1935 // the spill position.
1936 if (range->HasSpillOperand() &&
1937 safe_point >= range->spill_start_index() &&
1938 !range->GetSpillOperand()->IsConstant()) {
1939 TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
1940 range->id(), range->spill_start_index(), safe_point);
1941 map->RecordPointer(range->GetSpillOperand(), code_zone());
1944 if (!cur->IsSpilled()) {
1946 "Pointer in register for range %d (start at %d) "
1947 "at safe point %d\n",
1948 cur->id(), cur->Start().Value(), safe_point);
1949 InstructionOperand* operand = cur->GetAssignedOperand(operand_cache());
1950 DCHECK(!operand->IsStackSlot());
1951 map->RecordPointer(operand, code_zone());
1958 void RegisterAllocator::AllocateGeneralRegisters() {
1959 num_registers_ = config()->num_general_registers();
1960 mode_ = GENERAL_REGISTERS;
1961 AllocateRegisters();
1965 void RegisterAllocator::AllocateDoubleRegisters() {
1966 num_registers_ = config()->num_aliased_double_registers();
1967 mode_ = DOUBLE_REGISTERS;
1968 AllocateRegisters();
1972 void RegisterAllocator::AllocateRegisters() {
1973 DCHECK(unhandled_live_ranges().empty());
1975 for (auto range : live_ranges()) {
1976 if (range == nullptr) continue;
1977 if (range->Kind() == mode_) {
1978 AddToUnhandledUnsorted(range);
1982 DCHECK(UnhandledIsSorted());
1984 DCHECK(active_live_ranges().empty());
1985 DCHECK(inactive_live_ranges().empty());
1987 if (mode_ == DOUBLE_REGISTERS) {
1988 for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
1989 auto current = fixed_double_live_ranges()[i];
1990 if (current != nullptr) {
1991 AddToInactive(current);
1995 DCHECK(mode_ == GENERAL_REGISTERS);
1996 for (auto current : fixed_live_ranges()) {
1997 if (current != nullptr) {
1998 AddToInactive(current);
2003 while (!unhandled_live_ranges().empty()) {
2004 DCHECK(UnhandledIsSorted());
2005 auto current = unhandled_live_ranges().back();
2006 unhandled_live_ranges().pop_back();
2007 DCHECK(UnhandledIsSorted());
2008 auto position = current->Start();
2010 allocation_finger_ = position;
2012 TRACE("Processing interval %d start=%d\n", current->id(), position.Value());
2014 if (!current->HasNoSpillType()) {
2015 TRACE("Live range %d already has a spill operand\n", current->id());
2016 auto next_pos = position;
2017 if (code()->IsGapAt(next_pos.InstructionIndex())) {
2018 next_pos = next_pos.NextInstruction();
2020 auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
2021 // If the range already has a spill operand and it doesn't need a
2022 // register immediately, split it and spill the first part of the range.
2023 if (pos == nullptr) {
2026 } else if (pos->pos().Value() >
2027 current->Start().NextInstruction().Value()) {
2028 // Do not spill live range eagerly if use position that can benefit from
2029 // the register is too close to the start of live range.
2030 SpillBetween(current, current->Start(), pos->pos());
2031 DCHECK(UnhandledIsSorted());
2036 if (TryReuseSpillForPhi(current)) continue;
2038 for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2039 auto cur_active = active_live_ranges()[i];
2040 if (cur_active->End().Value() <= position.Value()) {
2041 ActiveToHandled(cur_active);
2042 --i; // The live range was removed from the list of active live ranges.
2043 } else if (!cur_active->Covers(position)) {
2044 ActiveToInactive(cur_active);
2045 --i; // The live range was removed from the list of active live ranges.
2049 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2050 auto cur_inactive = inactive_live_ranges()[i];
2051 if (cur_inactive->End().Value() <= position.Value()) {
2052 InactiveToHandled(cur_inactive);
2053 --i; // Live range was removed from the list of inactive live ranges.
2054 } else if (cur_inactive->Covers(position)) {
2055 InactiveToActive(cur_inactive);
2056 --i; // Live range was removed from the list of inactive live ranges.
2060 DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
2062 bool result = TryAllocateFreeReg(current);
2063 if (!result) AllocateBlockedReg(current);
2064 if (current->HasRegisterAssigned()) {
2065 AddToActive(current);
2069 active_live_ranges().clear();
2070 inactive_live_ranges().clear();
2074 const char* RegisterAllocator::RegisterName(int allocation_index) {
2075 if (mode_ == GENERAL_REGISTERS) {
2076 return config()->general_register_name(allocation_index);
2078 return config()->double_register_name(allocation_index);
2083 bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
2084 return code()->IsReference(virtual_register);
2088 RegisterKind RegisterAllocator::RequiredRegisterKind(
2089 int virtual_register) const {
2090 return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
2091 : GENERAL_REGISTERS;
2095 void RegisterAllocator::AddToActive(LiveRange* range) {
2096 TRACE("Add live range %d to active\n", range->id());
2097 active_live_ranges().push_back(range);
2101 void RegisterAllocator::AddToInactive(LiveRange* range) {
2102 TRACE("Add live range %d to inactive\n", range->id());
2103 inactive_live_ranges().push_back(range);
2107 void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
2108 if (range == nullptr || range->IsEmpty()) return;
2109 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
2110 DCHECK(allocation_finger_.Value() <= range->Start().Value());
2111 for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
2113 auto cur_range = unhandled_live_ranges().at(i);
2114 if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
2115 TRACE("Add live range %d to unhandled at %d\n", range->id(), i + 1);
2116 auto it = unhandled_live_ranges().begin() + (i + 1);
2117 unhandled_live_ranges().insert(it, range);
2118 DCHECK(UnhandledIsSorted());
2121 TRACE("Add live range %d to unhandled at start\n", range->id());
2122 unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
2123 DCHECK(UnhandledIsSorted());
2127 void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
2128 if (range == nullptr || range->IsEmpty()) return;
2129 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
2130 TRACE("Add live range %d to unhandled unsorted at end\n", range->id());
2131 unhandled_live_ranges().push_back(range);
2135 static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
2136 DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
2137 if (a->ShouldBeAllocatedBefore(b)) return false;
2138 if (b->ShouldBeAllocatedBefore(a)) return true;
2139 return a->id() < b->id();
2143 // Sort the unhandled live ranges so that the ranges to be processed first are
2144 // at the end of the array list. This is convenient for the register allocation
2145 // algorithm because it is efficient to remove elements from the end.
2146 void RegisterAllocator::SortUnhandled() {
2147 TRACE("Sort unhandled\n");
2148 std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
2149 &UnhandledSortHelper);
2153 bool RegisterAllocator::UnhandledIsSorted() {
2154 size_t len = unhandled_live_ranges().size();
2155 for (size_t i = 1; i < len; i++) {
2156 auto a = unhandled_live_ranges().at(i - 1);
2157 auto b = unhandled_live_ranges().at(i);
2158 if (a->Start().Value() < b->Start().Value()) return false;
2164 void RegisterAllocator::ActiveToHandled(LiveRange* range) {
2165 RemoveElement(&active_live_ranges(), range);
2166 TRACE("Moving live range %d from active to handled\n", range->id());
2170 void RegisterAllocator::ActiveToInactive(LiveRange* range) {
2171 RemoveElement(&active_live_ranges(), range);
2172 inactive_live_ranges().push_back(range);
2173 TRACE("Moving live range %d from active to inactive\n", range->id());
2177 void RegisterAllocator::InactiveToHandled(LiveRange* range) {
2178 RemoveElement(&inactive_live_ranges(), range);
2179 TRACE("Moving live range %d from inactive to handled\n", range->id());
2183 void RegisterAllocator::InactiveToActive(LiveRange* range) {
2184 RemoveElement(&inactive_live_ranges(), range);
2185 active_live_ranges().push_back(range);
2186 TRACE("Moving live range %d from inactive to active\n", range->id());
2190 bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
2191 LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
2193 for (int i = 0; i < num_registers_; i++) {
2194 free_until_pos[i] = LifetimePosition::MaxPosition();
2197 for (auto cur_active : active_live_ranges()) {
2198 free_until_pos[cur_active->assigned_register()] =
2199 LifetimePosition::FromInstructionIndex(0);
2202 for (auto cur_inactive : inactive_live_ranges()) {
2203 DCHECK(cur_inactive->End().Value() > current->Start().Value());
2204 auto next_intersection = cur_inactive->FirstIntersection(current);
2205 if (!next_intersection.IsValid()) continue;
2206 int cur_reg = cur_inactive->assigned_register();
2207 free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
2210 auto hint = current->FirstHint();
2211 if (hint != nullptr && (hint->IsRegister() || hint->IsDoubleRegister())) {
2212 int register_index = hint->index();
2213 TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
2214 RegisterName(register_index), free_until_pos[register_index].Value(),
2215 current->id(), current->End().Value());
2217 // The desired register is free until the end of the current live range.
2218 if (free_until_pos[register_index].Value() >= current->End().Value()) {
2219 TRACE("Assigning preferred reg %s to live range %d\n",
2220 RegisterName(register_index), current->id());
2221 SetLiveRangeAssignedRegister(current, register_index);
2226 // Find the register which stays free for the longest time.
2228 for (int i = 1; i < RegisterCount(); ++i) {
2229 if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
2234 auto pos = free_until_pos[reg];
2236 if (pos.Value() <= current->Start().Value()) {
2237 // All registers are blocked.
2241 if (pos.Value() < current->End().Value()) {
2242 // Register reg is available at the range start but becomes blocked before
2243 // the range end. Split current at position where it becomes blocked.
2244 auto tail = SplitRangeAt(current, pos);
2245 AddToUnhandledSorted(tail);
2248 // Register reg is available at the range start and is free until
2250 DCHECK(pos.Value() >= current->End().Value());
2251 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg),
2253 SetLiveRangeAssignedRegister(current, reg);
2259 void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
2260 auto register_use = current->NextRegisterPosition(current->Start());
2261 if (register_use == nullptr) {
2262 // There is no use in the current live range that requires a register.
2263 // We can just spill it.
2268 LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
2269 LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
2271 for (int i = 0; i < num_registers_; i++) {
2272 use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
2275 for (auto range : active_live_ranges()) {
2276 int cur_reg = range->assigned_register();
2277 if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
2278 block_pos[cur_reg] = use_pos[cur_reg] =
2279 LifetimePosition::FromInstructionIndex(0);
2282 range->NextUsePositionRegisterIsBeneficial(current->Start());
2283 if (next_use == nullptr) {
2284 use_pos[cur_reg] = range->End();
2286 use_pos[cur_reg] = next_use->pos();
2291 for (auto range : inactive_live_ranges()) {
2292 DCHECK(range->End().Value() > current->Start().Value());
2293 auto next_intersection = range->FirstIntersection(current);
2294 if (!next_intersection.IsValid()) continue;
2295 int cur_reg = range->assigned_register();
2296 if (range->IsFixed()) {
2297 block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
2298 use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
2300 use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
2305 for (int i = 1; i < RegisterCount(); ++i) {
2306 if (use_pos[i].Value() > use_pos[reg].Value()) {
2311 auto pos = use_pos[reg];
2313 if (pos.Value() < register_use->pos().Value()) {
2314 // All registers are blocked before the first use that requires a register.
2315 // Spill starting part of live range up to that use.
2316 SpillBetween(current, current->Start(), register_use->pos());
2320 if (block_pos[reg].Value() < current->End().Value()) {
2321 // Register becomes blocked before the current range end. Split before that
2323 LiveRange* tail = SplitBetween(current, current->Start(),
2324 block_pos[reg].InstructionStart());
2325 AddToUnhandledSorted(tail);
2328 // Register reg is not blocked for the whole range.
2329 DCHECK(block_pos[reg].Value() >= current->End().Value());
2330 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
2332 SetLiveRangeAssignedRegister(current, reg);
2334 // This register was not free. Thus we need to find and spill
2335 // parts of active and inactive live regions that use the same register
2336 // at the same lifetime positions as current.
2337 SplitAndSpillIntersecting(current);
2341 static const InstructionBlock* GetContainingLoop(
2342 const InstructionSequence* sequence, const InstructionBlock* block) {
2343 auto index = block->loop_header();
2344 if (!index.IsValid()) return nullptr;
2345 return sequence->InstructionBlockAt(index);
2349 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2350 LiveRange* range, LifetimePosition pos) {
2351 auto block = GetInstructionBlock(pos.InstructionStart());
2353 block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2355 if (loop_header == nullptr) return pos;
2357 auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
2359 while (loop_header != nullptr) {
2360 // We are going to spill live range inside the loop.
2361 // If possible try to move spilling position backwards to loop header.
2362 // This will reduce number of memory moves on the back edge.
2363 auto loop_start = LifetimePosition::FromInstructionIndex(
2364 loop_header->first_instruction_index());
2366 if (range->Covers(loop_start)) {
2367 if (prev_use == nullptr || prev_use->pos().Value() < loop_start.Value()) {
2368 // No register beneficial use inside the loop before the pos.
2373 // Try hoisting out to an outer loop.
2374 loop_header = GetContainingLoop(code(), loop_header);
2381 void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
2382 DCHECK(current->HasRegisterAssigned());
2383 int reg = current->assigned_register();
2384 auto split_pos = current->Start();
2385 for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2386 auto range = active_live_ranges()[i];
2387 if (range->assigned_register() == reg) {
2388 auto next_pos = range->NextRegisterPosition(current->Start());
2389 auto spill_pos = FindOptimalSpillingPos(range, split_pos);
2390 if (next_pos == nullptr) {
2391 SpillAfter(range, spill_pos);
2393 // When spilling between spill_pos and next_pos ensure that the range
2394 // remains spilled at least until the start of the current live range.
2395 // This guarantees that we will not introduce new unhandled ranges that
2396 // start before the current range as this violates allocation invariant
2397 // and will lead to an inconsistent state of active and inactive
2398 // live-ranges: ranges are allocated in order of their start positions,
2399 // ranges are retired from active/inactive when the start of the
2400 // current live-range is larger than their end.
2401 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2403 ActiveToHandled(range);
2408 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2409 auto range = inactive_live_ranges()[i];
2410 DCHECK(range->End().Value() > current->Start().Value());
2411 if (range->assigned_register() == reg && !range->IsFixed()) {
2412 LifetimePosition next_intersection = range->FirstIntersection(current);
2413 if (next_intersection.IsValid()) {
2414 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2415 if (next_pos == nullptr) {
2416 SpillAfter(range, split_pos);
2418 next_intersection = Min(next_intersection, next_pos->pos());
2419 SpillBetween(range, split_pos, next_intersection);
2421 InactiveToHandled(range);
2429 bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
2430 return pos.IsInstructionStart() &&
2431 code()->GetInstructionBlock(pos.InstructionIndex())->code_start() ==
2432 pos.InstructionIndex();
2436 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2437 LifetimePosition pos) {
2438 DCHECK(!range->IsFixed());
2439 TRACE("Splitting live range %d at %d\n", range->id(), pos.Value());
2441 if (pos.Value() <= range->Start().Value()) return range;
2443 // We can't properly connect liveranges if splitting occurred at the end
2445 DCHECK(pos.IsInstructionStart() ||
2446 (code()->GetInstructionBlock(pos.InstructionIndex()))
2447 ->last_instruction_index() != pos.InstructionIndex());
2449 int vreg = GetVirtualRegister();
2450 auto result = LiveRangeFor(vreg);
2451 range->SplitAt(pos, result, local_zone());
2456 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2457 LifetimePosition start,
2458 LifetimePosition end) {
2459 DCHECK(!range->IsFixed());
2460 TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(),
2461 start.Value(), end.Value());
2463 auto split_pos = FindOptimalSplitPos(start, end);
2464 DCHECK(split_pos.Value() >= start.Value());
2465 return SplitRangeAt(range, split_pos);
2469 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2470 LifetimePosition end) {
2471 int start_instr = start.InstructionIndex();
2472 int end_instr = end.InstructionIndex();
2473 DCHECK(start_instr <= end_instr);
2475 // We have no choice
2476 if (start_instr == end_instr) return end;
2478 auto start_block = GetInstructionBlock(start);
2479 auto end_block = GetInstructionBlock(end);
2481 if (end_block == start_block) {
2482 // The interval is split in the same basic block. Split at the latest
2483 // possible position.
2487 auto block = end_block;
2488 // Find header of outermost loop.
2489 // TODO(titzer): fix redundancy below.
2490 while (GetContainingLoop(code(), block) != nullptr &&
2491 GetContainingLoop(code(), block)->rpo_number().ToInt() >
2492 start_block->rpo_number().ToInt()) {
2493 block = GetContainingLoop(code(), block);
2496 // We did not find any suitable outer loop. Split at the latest possible
2497 // position unless end_block is a loop header itself.
2498 if (block == end_block && !end_block->IsLoopHeader()) return end;
2500 return LifetimePosition::FromInstructionIndex(
2501 block->first_instruction_index());
2505 void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2506 auto second_part = SplitRangeAt(range, pos);
2511 void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
2512 LifetimePosition end) {
2513 SpillBetweenUntil(range, start, start, end);
2517 void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
2518 LifetimePosition start,
2519 LifetimePosition until,
2520 LifetimePosition end) {
2521 CHECK(start.Value() < end.Value());
2522 auto second_part = SplitRangeAt(range, start);
2524 if (second_part->Start().Value() < end.Value()) {
2525 // The split result intersects with [start, end[.
2526 // Split it at position between ]start+1, end[, spill the middle part
2527 // and put the rest to unhandled.
2528 auto third_part_end = end.PrevInstruction().InstructionEnd();
2529 if (IsBlockBoundary(end.InstructionStart())) {
2530 third_part_end = end.InstructionStart();
2532 auto third_part = SplitBetween(
2533 second_part, Max(second_part->Start().InstructionEnd(), until),
2536 DCHECK(third_part != second_part);
2539 AddToUnhandledSorted(third_part);
2541 // The split result does not intersect with [start, end[.
2542 // Nothing to spill. Just put it to unhandled as whole.
2543 AddToUnhandledSorted(second_part);
2548 void RegisterAllocator::Spill(LiveRange* range) {
2549 DCHECK(!range->IsSpilled());
2550 TRACE("Spilling live range %d\n", range->id());
2551 auto first = range->TopLevel();
2552 if (first->HasNoSpillType()) {
2553 AssignSpillRangeToLiveRange(first);
2555 range->MakeSpilled();
2559 int RegisterAllocator::RegisterCount() const { return num_registers_; }
2565 void RegisterAllocator::Verify() const {
2566 for (auto current : live_ranges()) {
2567 if (current != nullptr) current->Verify();
2575 void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2577 if (range->Kind() == DOUBLE_REGISTERS) {
2578 assigned_double_registers_->Add(reg);
2580 DCHECK(range->Kind() == GENERAL_REGISTERS);
2581 assigned_registers_->Add(reg);
2583 range->set_assigned_register(reg, operand_cache());
2586 } // namespace compiler
2587 } // namespace internal