1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/linkage.h"
6 #include "src/compiler/register-allocator.h"
7 #include "src/string-stream.h"
13 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
14 return a.Value() < b.Value() ? a : b;
18 static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
19 return a.Value() > b.Value() ? a : b;
23 static void TraceAlloc(const char* msg, ...) {
24 if (FLAG_trace_alloc) {
26 va_start(arguments, msg);
27 base::OS::VPrint(msg, arguments);
33 static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
34 auto it = std::find(v->begin(), v->end(), range);
35 DCHECK(it != v->end());
40 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
41 InstructionOperand* hint)
47 register_beneficial_(true) {
48 if (operand_ != nullptr && operand_->IsUnallocated()) {
49 const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
50 requires_reg_ = unalloc->HasRegisterPolicy();
51 register_beneficial_ = !unalloc->HasAnyPolicy();
53 DCHECK(pos_.IsValid());
57 bool UsePosition::HasHint() const {
58 return hint_ != nullptr && !hint_->IsUnallocated();
62 bool UsePosition::RequiresRegister() const { return requires_reg_; }
65 bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
68 void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
69 DCHECK(Contains(pos) && pos.Value() != start().Value());
70 auto after = new (zone) UseInterval(pos, end_);
77 struct LiveRange::SpillAtDefinitionList : ZoneObject {
78 SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
79 SpillAtDefinitionList* next)
80 : gap_index(gap_index), operand(operand), next(next) {}
82 InstructionOperand* const operand;
83 SpillAtDefinitionList* const next;
90 void LiveRange::Verify() const {
91 UsePosition* cur = first_pos_;
92 while (cur != nullptr) {
93 DCHECK(Start().Value() <= cur->pos().Value() &&
94 cur->pos().Value() <= End().Value());
100 bool LiveRange::HasOverlap(UseInterval* target) const {
101 UseInterval* current_interval = first_interval_;
102 while (current_interval != nullptr) {
103 // Intervals overlap if the start of one is contained in the other.
104 if (current_interval->Contains(target->start()) ||
105 target->Contains(current_interval->start())) {
108 current_interval = current_interval->next();
117 LiveRange::LiveRange(int id, Zone* zone)
121 is_non_loop_phi_(false),
122 kind_(UNALLOCATED_REGISTERS),
123 assigned_register_(kInvalidAssignment),
124 last_interval_(nullptr),
125 first_interval_(nullptr),
129 current_interval_(nullptr),
130 last_processed_use_(nullptr),
131 current_hint_operand_(nullptr),
132 spill_start_index_(kMaxInt),
133 spill_type_(SpillType::kNoSpillType),
134 spill_operand_(nullptr),
135 spills_at_definition_(nullptr) {}
138 void LiveRange::set_assigned_register(int reg,
139 InstructionOperandCache* operand_cache) {
140 DCHECK(!HasRegisterAssigned() && !IsSpilled());
141 assigned_register_ = reg;
142 // TODO(dcarney): stop aliasing hint operands.
143 ConvertUsesToOperand(GetAssignedOperand(operand_cache));
147 void LiveRange::MakeSpilled() {
148 DCHECK(!IsSpilled());
149 DCHECK(!TopLevel()->HasNoSpillType());
151 assigned_register_ = kInvalidAssignment;
155 void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
156 InstructionOperand* operand) {
157 DCHECK(HasNoSpillType());
158 spills_at_definition_ = new (zone)
159 SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
163 void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
164 InstructionOperand* op) {
165 auto to_spill = TopLevel()->spills_at_definition_;
166 if (to_spill == nullptr) return;
167 auto zone = sequence->zone();
168 for (; to_spill != nullptr; to_spill = to_spill->next) {
169 auto gap = sequence->GapAt(to_spill->gap_index);
170 auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
171 move->AddMove(to_spill->operand, op, zone);
173 TopLevel()->spills_at_definition_ = nullptr;
177 void LiveRange::SetSpillOperand(InstructionOperand* operand) {
178 DCHECK(HasNoSpillType());
179 DCHECK(!operand->IsUnallocated());
180 spill_type_ = SpillType::kSpillOperand;
181 spill_operand_ = operand;
185 void LiveRange::SetSpillRange(SpillRange* spill_range) {
186 DCHECK(HasNoSpillType() || HasSpillRange());
188 spill_type_ = SpillType::kSpillRange;
189 spill_range_ = spill_range;
193 void LiveRange::CommitSpillOperand(InstructionOperand* operand) {
194 DCHECK(HasSpillRange());
195 DCHECK(!operand->IsUnallocated());
197 spill_type_ = SpillType::kSpillOperand;
198 spill_operand_ = operand;
202 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
203 UsePosition* use_pos = last_processed_use_;
204 if (use_pos == nullptr) use_pos = first_pos();
205 while (use_pos != nullptr && use_pos->pos().Value() < start.Value()) {
206 use_pos = use_pos->next();
208 last_processed_use_ = use_pos;
213 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
214 LifetimePosition start) {
215 UsePosition* pos = NextUsePosition(start);
216 while (pos != nullptr && !pos->RegisterIsBeneficial()) {
223 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
224 LifetimePosition start) {
225 auto pos = first_pos();
226 UsePosition* prev = nullptr;
227 while (pos != nullptr && pos->pos().Value() < start.Value()) {
228 if (pos->RegisterIsBeneficial()) prev = pos;
235 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
236 UsePosition* pos = NextUsePosition(start);
237 while (pos != nullptr && !pos->RequiresRegister()) {
244 bool LiveRange::CanBeSpilled(LifetimePosition pos) {
245 // We cannot spill a live range that has a use requiring a register
246 // at the current or the immediate next position.
247 auto use_pos = NextRegisterPosition(pos);
248 if (use_pos == nullptr) return true;
249 return use_pos->pos().Value() >
250 pos.NextInstruction().InstructionEnd().Value();
254 InstructionOperand* LiveRange::GetAssignedOperand(
255 InstructionOperandCache* cache) const {
256 if (HasRegisterAssigned()) {
257 DCHECK(!IsSpilled());
259 case GENERAL_REGISTERS:
260 return cache->RegisterOperand(assigned_register());
261 case DOUBLE_REGISTERS:
262 return cache->DoubleRegisterOperand(assigned_register());
268 DCHECK(!HasRegisterAssigned());
269 auto op = TopLevel()->GetSpillOperand();
270 DCHECK(!op->IsUnallocated());
275 InstructionOperand LiveRange::GetAssignedOperand() const {
276 if (HasRegisterAssigned()) {
277 DCHECK(!IsSpilled());
279 case GENERAL_REGISTERS:
280 return RegisterOperand(assigned_register());
281 case DOUBLE_REGISTERS:
282 return DoubleRegisterOperand(assigned_register());
288 DCHECK(!HasRegisterAssigned());
289 auto op = TopLevel()->GetSpillOperand();
290 DCHECK(!op->IsUnallocated());
295 UseInterval* LiveRange::FirstSearchIntervalForPosition(
296 LifetimePosition position) const {
297 if (current_interval_ == nullptr) return first_interval_;
298 if (current_interval_->start().Value() > position.Value()) {
299 current_interval_ = nullptr;
300 return first_interval_;
302 return current_interval_;
306 void LiveRange::AdvanceLastProcessedMarker(
307 UseInterval* to_start_of, LifetimePosition but_not_past) const {
308 if (to_start_of == nullptr) return;
309 if (to_start_of->start().Value() > but_not_past.Value()) return;
310 auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
311 : current_interval_->start();
312 if (to_start_of->start().Value() > start.Value()) {
313 current_interval_ = to_start_of;
318 void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
320 DCHECK(Start().Value() < position.Value());
321 DCHECK(result->IsEmpty());
322 // Find the last interval that ends before the position. If the
323 // position is contained in one of the intervals in the chain, we
324 // split that interval and use the first part.
325 auto current = FirstSearchIntervalForPosition(position);
327 // If the split position coincides with the beginning of a use interval
328 // we need to split use positons in a special way.
329 bool split_at_start = false;
331 if (current->start().Value() == position.Value()) {
332 // When splitting at start we need to locate the previous use interval.
333 current = first_interval_;
336 while (current != nullptr) {
337 if (current->Contains(position)) {
338 current->SplitAt(position, zone);
341 auto next = current->next();
342 if (next->start().Value() >= position.Value()) {
343 split_at_start = (next->start().Value() == position.Value());
349 // Partition original use intervals to the two live ranges.
350 auto before = current;
351 auto after = before->next();
352 result->last_interval_ =
353 (last_interval_ == before)
354 ? after // Only interval in the range after split.
355 : last_interval_; // Last interval of the original range.
356 result->first_interval_ = after;
357 last_interval_ = before;
359 // Find the last use position before the split and the first use
360 // position after it.
361 auto use_after = first_pos_;
362 UsePosition* use_before = nullptr;
363 if (split_at_start) {
364 // The split position coincides with the beginning of a use interval (the
365 // end of a lifetime hole). Use at this position should be attributed to
366 // the split child because split child owns use interval covering it.
367 while (use_after != nullptr &&
368 use_after->pos().Value() < position.Value()) {
369 use_before = use_after;
370 use_after = use_after->next();
373 while (use_after != nullptr &&
374 use_after->pos().Value() <= position.Value()) {
375 use_before = use_after;
376 use_after = use_after->next();
380 // Partition original use positions to the two live ranges.
381 if (use_before != nullptr) {
382 use_before->next_ = nullptr;
384 first_pos_ = nullptr;
386 result->first_pos_ = use_after;
388 // Discard cached iteration state. It might be pointing
389 // to the use that no longer belongs to this live range.
390 last_processed_use_ = nullptr;
391 current_interval_ = nullptr;
393 // Link the new live range in the chain before any of the other
394 // ranges linked from the range before the split.
395 result->parent_ = (parent_ == nullptr) ? this : parent_;
396 result->kind_ = result->parent_->kind_;
397 result->next_ = next_;
407 // This implements an ordering on live ranges so that they are ordered by their
408 // start positions. This is needed for the correctness of the register
409 // allocation algorithm. If two live ranges start at the same offset then there
410 // is a tie breaker based on where the value is first used. This part of the
411 // ordering is merely a heuristic.
412 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
413 LifetimePosition start = Start();
414 LifetimePosition other_start = other->Start();
415 if (start.Value() == other_start.Value()) {
416 UsePosition* pos = first_pos();
417 if (pos == nullptr) return false;
418 UsePosition* other_pos = other->first_pos();
419 if (other_pos == nullptr) return true;
420 return pos->pos().Value() < other_pos->pos().Value();
422 return start.Value() < other_start.Value();
426 void LiveRange::ShortenTo(LifetimePosition start) {
427 TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
428 DCHECK(first_interval_ != nullptr);
429 DCHECK(first_interval_->start().Value() <= start.Value());
430 DCHECK(start.Value() < first_interval_->end().Value());
431 first_interval_->set_start(start);
435 void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
437 TraceAlloc("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
440 while (first_interval_ != nullptr &&
441 first_interval_->start().Value() <= end.Value()) {
442 if (first_interval_->end().Value() > end.Value()) {
443 new_end = first_interval_->end();
445 first_interval_ = first_interval_->next();
448 auto new_interval = new (zone) UseInterval(start, new_end);
449 new_interval->next_ = first_interval_;
450 first_interval_ = new_interval;
451 if (new_interval->next() == nullptr) {
452 last_interval_ = new_interval;
457 void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
459 TraceAlloc("Add to live range %d interval [%d %d[\n", id_, start.Value(),
461 if (first_interval_ == nullptr) {
462 auto interval = new (zone) UseInterval(start, end);
463 first_interval_ = interval;
464 last_interval_ = interval;
466 if (end.Value() == first_interval_->start().Value()) {
467 first_interval_->set_start(start);
468 } else if (end.Value() < first_interval_->start().Value()) {
469 auto interval = new (zone) UseInterval(start, end);
470 interval->set_next(first_interval_);
471 first_interval_ = interval;
473 // Order of instruction's processing (see ProcessInstructions) guarantees
474 // that each new use interval either precedes or intersects with
475 // last added interval.
476 DCHECK(start.Value() < first_interval_->end().Value());
477 first_interval_->start_ = Min(start, first_interval_->start_);
478 first_interval_->end_ = Max(end, first_interval_->end_);
484 void LiveRange::AddUsePosition(LifetimePosition pos,
485 InstructionOperand* operand,
486 InstructionOperand* hint, Zone* zone) {
487 TraceAlloc("Add to live range %d use position %d\n", id_, pos.Value());
488 auto use_pos = new (zone) UsePosition(pos, operand, hint);
489 UsePosition* prev_hint = nullptr;
490 UsePosition* prev = nullptr;
491 auto current = first_pos_;
492 while (current != nullptr && current->pos().Value() < pos.Value()) {
493 prev_hint = current->HasHint() ? current : prev_hint;
495 current = current->next();
498 if (prev == nullptr) {
499 use_pos->set_next(first_pos_);
500 first_pos_ = use_pos;
502 use_pos->next_ = prev->next_;
503 prev->next_ = use_pos;
506 if (prev_hint == nullptr && use_pos->HasHint()) {
507 current_hint_operand_ = hint;
512 void LiveRange::ConvertUsesToOperand(InstructionOperand* op) {
513 auto use_pos = first_pos();
514 while (use_pos != nullptr) {
515 DCHECK(Start().Value() <= use_pos->pos().Value() &&
516 use_pos->pos().Value() <= End().Value());
518 if (use_pos->HasOperand()) {
519 DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
520 !use_pos->RequiresRegister());
521 use_pos->operand()->ConvertTo(op->kind(), op->index());
523 use_pos = use_pos->next();
528 bool LiveRange::CanCover(LifetimePosition position) const {
529 if (IsEmpty()) return false;
530 return Start().Value() <= position.Value() &&
531 position.Value() < End().Value();
535 bool LiveRange::Covers(LifetimePosition position) {
536 if (!CanCover(position)) return false;
537 auto start_search = FirstSearchIntervalForPosition(position);
538 for (auto interval = start_search; interval != nullptr;
539 interval = interval->next()) {
540 DCHECK(interval->next() == nullptr ||
541 interval->next()->start().Value() >= interval->start().Value());
542 AdvanceLastProcessedMarker(interval, position);
543 if (interval->Contains(position)) return true;
544 if (interval->start().Value() > position.Value()) return false;
550 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
551 auto b = other->first_interval();
552 if (b == nullptr) return LifetimePosition::Invalid();
553 auto advance_last_processed_up_to = b->start();
554 auto a = FirstSearchIntervalForPosition(b->start());
555 while (a != nullptr && b != nullptr) {
556 if (a->start().Value() > other->End().Value()) break;
557 if (b->start().Value() > End().Value()) break;
558 auto cur_intersection = a->Intersect(b);
559 if (cur_intersection.IsValid()) {
560 return cur_intersection;
562 if (a->start().Value() < b->start().Value()) {
564 if (a == nullptr || a->start().Value() > other->End().Value()) break;
565 AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
570 return LifetimePosition::Invalid();
574 InstructionOperandCache::InstructionOperandCache() {
575 for (size_t i = 0; i < arraysize(general_register_operands_); ++i) {
576 general_register_operands_[i] =
577 i::compiler::RegisterOperand(static_cast<int>(i));
579 for (size_t i = 0; i < arraysize(double_register_operands_); ++i) {
580 double_register_operands_[i] =
581 i::compiler::DoubleRegisterOperand(static_cast<int>(i));
586 RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
587 Zone* zone, Frame* frame,
588 InstructionSequence* code,
589 const char* debug_name)
593 debug_name_(debug_name),
595 operand_cache_(new (code_zone()) InstructionOperandCache()),
596 phi_map_(local_zone()),
597 live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
598 live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
599 fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
601 fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
603 unhandled_live_ranges_(local_zone()),
604 active_live_ranges_(local_zone()),
605 inactive_live_ranges_(local_zone()),
606 spill_ranges_(local_zone()),
607 mode_(UNALLOCATED_REGISTERS),
609 DCHECK(this->config()->num_general_registers() <=
610 RegisterConfiguration::kMaxGeneralRegisters);
611 DCHECK(this->config()->num_double_registers() <=
612 RegisterConfiguration::kMaxDoubleRegisters);
613 // TryAllocateFreeReg and AllocateBlockedReg assume this
614 // when allocating local arrays.
615 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
616 this->config()->num_general_registers());
617 unhandled_live_ranges().reserve(
618 static_cast<size_t>(code->VirtualRegisterCount() * 2));
619 active_live_ranges().reserve(8);
620 inactive_live_ranges().reserve(8);
621 spill_ranges().reserve(8);
622 assigned_registers_ =
623 new (code_zone()) BitVector(config->num_general_registers(), code_zone());
624 assigned_double_registers_ = new (code_zone())
625 BitVector(config->num_aliased_double_registers(), code_zone());
626 frame->SetAllocatedRegisters(assigned_registers_);
627 frame->SetAllocatedDoubleRegisters(assigned_double_registers_);
631 BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) {
632 // Compute live out for the given block, except not including backward
634 auto live_out = new (local_zone())
635 BitVector(code()->VirtualRegisterCount(), local_zone());
637 // Process all successor blocks.
638 for (auto succ : block->successors()) {
639 // Add values live on entry to the successor. Note the successor's
640 // live_in will not be computed yet for backwards edges.
641 auto live_in = live_in_sets_[succ.ToSize()];
642 if (live_in != nullptr) live_out->Union(*live_in);
644 // All phi input operands corresponding to this successor edge are live
645 // out from this block.
646 auto successor = code()->InstructionBlockAt(succ);
647 size_t index = successor->PredecessorIndexOf(block->rpo_number());
648 DCHECK(index < successor->PredecessorCount());
649 for (auto phi : successor->phis()) {
650 live_out->Add(phi->operands()[index]);
657 void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
658 BitVector* live_out) {
659 // Add an interval that includes the entire block to the live range for
660 // each live_out value.
662 LifetimePosition::FromInstructionIndex(block->first_instruction_index());
663 auto end = LifetimePosition::FromInstructionIndex(
664 block->last_instruction_index()).NextInstruction();
665 BitVector::Iterator iterator(live_out);
666 while (!iterator.Done()) {
667 int operand_index = iterator.Current();
668 auto range = LiveRangeFor(operand_index);
669 range->AddUseInterval(start, end, local_zone());
675 int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
676 return -index - 1 - config()->num_general_registers();
680 InstructionOperand* RegisterAllocator::AllocateFixed(
681 UnallocatedOperand* operand, int pos, bool is_tagged) {
682 TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
683 DCHECK(operand->HasFixedPolicy());
684 if (operand->HasFixedSlotPolicy()) {
685 operand->ConvertTo(InstructionOperand::STACK_SLOT,
686 operand->fixed_slot_index());
687 } else if (operand->HasFixedRegisterPolicy()) {
688 int reg_index = operand->fixed_register_index();
689 operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
690 } else if (operand->HasFixedDoubleRegisterPolicy()) {
691 int reg_index = operand->fixed_register_index();
692 operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
697 TraceAlloc("Fixed reg is tagged at %d\n", pos);
698 auto instr = InstructionAt(pos);
699 if (instr->HasPointerMap()) {
700 instr->pointer_map()->RecordPointer(operand, code_zone());
707 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
708 DCHECK(index < config()->num_general_registers());
709 auto result = fixed_live_ranges()[index];
710 if (result == nullptr) {
711 // TODO(titzer): add a utility method to allocate a new LiveRange:
712 // The LiveRange object itself can go in this zone, but the
713 // InstructionOperand needs
714 // to go in the code zone, since it may survive register allocation.
715 result = new (local_zone()) LiveRange(FixedLiveRangeID(index), code_zone());
716 DCHECK(result->IsFixed());
717 result->kind_ = GENERAL_REGISTERS;
718 SetLiveRangeAssignedRegister(result, index);
719 fixed_live_ranges()[index] = result;
725 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
726 DCHECK(index < config()->num_aliased_double_registers());
727 auto result = fixed_double_live_ranges()[index];
728 if (result == nullptr) {
729 result = new (local_zone())
730 LiveRange(FixedDoubleLiveRangeID(index), code_zone());
731 DCHECK(result->IsFixed());
732 result->kind_ = DOUBLE_REGISTERS;
733 SetLiveRangeAssignedRegister(result, index);
734 fixed_double_live_ranges()[index] = result;
740 LiveRange* RegisterAllocator::LiveRangeFor(int index) {
741 if (index >= static_cast<int>(live_ranges().size())) {
742 live_ranges().resize(index + 1, nullptr);
744 auto result = live_ranges()[index];
745 if (result == nullptr) {
746 result = new (local_zone()) LiveRange(index, code_zone());
747 live_ranges()[index] = result;
753 GapInstruction* RegisterAllocator::GetLastGap(const InstructionBlock* block) {
754 int last_instruction = block->last_instruction_index();
755 return code()->GapAt(last_instruction - 1);
759 LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
760 if (operand->IsUnallocated()) {
761 return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
762 } else if (operand->IsRegister()) {
763 return FixedLiveRangeFor(operand->index());
764 } else if (operand->IsDoubleRegister()) {
765 return FixedDoubleLiveRangeFor(operand->index());
772 void RegisterAllocator::Define(LifetimePosition position,
773 InstructionOperand* operand,
774 InstructionOperand* hint) {
775 auto range = LiveRangeFor(operand);
776 if (range == nullptr) return;
778 if (range->IsEmpty() || range->Start().Value() > position.Value()) {
779 // Can happen if there is a definition without use.
780 range->AddUseInterval(position, position.NextInstruction(), local_zone());
781 range->AddUsePosition(position.NextInstruction(), nullptr, nullptr,
784 range->ShortenTo(position);
787 if (operand->IsUnallocated()) {
788 auto unalloc_operand = UnallocatedOperand::cast(operand);
789 range->AddUsePosition(position, unalloc_operand, hint, local_zone());
794 void RegisterAllocator::Use(LifetimePosition block_start,
795 LifetimePosition position,
796 InstructionOperand* operand,
797 InstructionOperand* hint) {
798 auto range = LiveRangeFor(operand);
799 if (range == nullptr) return;
800 if (operand->IsUnallocated()) {
801 UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
802 range->AddUsePosition(position, unalloc_operand, hint, local_zone());
804 range->AddUseInterval(block_start, position, local_zone());
808 void RegisterAllocator::AddGapMove(int index,
809 GapInstruction::InnerPosition position,
810 InstructionOperand* from,
811 InstructionOperand* to) {
812 auto gap = code()->GapAt(index);
813 auto move = gap->GetOrCreateParallelMove(position, code_zone());
814 move->AddMove(from, to, code_zone());
818 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
819 UseInterval* interval2) {
820 while (interval1 != nullptr && interval2 != nullptr) {
821 if (interval1->start().Value() < interval2->start().Value()) {
822 if (interval1->end().Value() > interval2->start().Value()) {
825 interval1 = interval1->next();
827 if (interval2->end().Value() > interval1->start().Value()) {
830 interval2 = interval2->next();
837 SpillRange::SpillRange(LiveRange* range, Zone* zone) : live_ranges_(zone) {
838 auto src = range->first_interval();
839 UseInterval* result = nullptr;
840 UseInterval* node = nullptr;
842 while (src != nullptr) {
843 auto new_node = new (zone) UseInterval(src->start(), src->end());
844 if (result == nullptr) {
847 node->set_next(new_node);
852 use_interval_ = result;
853 live_ranges().push_back(range);
854 end_position_ = node->end();
855 DCHECK(!range->HasSpillRange());
856 range->SetSpillRange(this);
860 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
861 if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
862 this->End().Value() <= other->use_interval_->start().Value() ||
863 other->End().Value() <= this->use_interval_->start().Value()) {
866 return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
870 bool SpillRange::TryMerge(SpillRange* other) {
871 if (Kind() != other->Kind() || IsIntersectingWith(other)) return false;
873 auto max = LifetimePosition::MaxPosition();
874 if (End().Value() < other->End().Value() &&
875 other->End().Value() != max.Value()) {
876 end_position_ = other->End();
878 other->end_position_ = max;
880 MergeDisjointIntervals(other->use_interval_);
881 other->use_interval_ = nullptr;
883 for (auto range : other->live_ranges()) {
884 DCHECK(range->GetSpillRange() == other);
885 range->SetSpillRange(this);
888 live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
889 other->live_ranges().end());
890 other->live_ranges().clear();
896 void SpillRange::SetOperand(InstructionOperand* op) {
897 for (auto range : live_ranges()) {
898 DCHECK(range->GetSpillRange() == this);
899 range->CommitSpillOperand(op);
904 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
905 UseInterval* tail = nullptr;
906 auto current = use_interval_;
907 while (other != nullptr) {
908 // Make sure the 'current' list starts first
909 if (current == nullptr ||
910 current->start().Value() > other->start().Value()) {
911 std::swap(current, other);
913 // Check disjointness
914 DCHECK(other == nullptr ||
915 current->end().Value() <= other->start().Value());
916 // Append the 'current' node to the result accumulator and move forward
917 if (tail == nullptr) {
918 use_interval_ = current;
920 tail->set_next(current);
923 current = current->next();
925 // Other list is empty => we are done
929 void RegisterAllocator::AssignSpillSlots() {
930 // Merge disjoint spill ranges
931 for (size_t i = 0; i < spill_ranges().size(); i++) {
932 auto range = spill_ranges()[i];
933 if (range->IsEmpty()) continue;
934 for (size_t j = i + 1; j < spill_ranges().size(); j++) {
935 auto other = spill_ranges()[j];
936 if (!other->IsEmpty()) {
937 range->TryMerge(other);
942 // Allocate slots for the merged spill ranges.
943 for (auto range : spill_ranges()) {
944 if (range->IsEmpty()) continue;
945 // Allocate a new operand referring to the spill slot.
946 auto kind = range->Kind();
947 int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
948 auto op_kind = kind == DOUBLE_REGISTERS
949 ? InstructionOperand::DOUBLE_STACK_SLOT
950 : InstructionOperand::STACK_SLOT;
951 auto op = InstructionOperand::New(code_zone(), op_kind, index);
952 range->SetOperand(op);
957 void RegisterAllocator::CommitAssignment() {
958 for (auto range : live_ranges()) {
959 if (range == nullptr || range->IsEmpty()) continue;
960 // Register assignments were committed in set_assigned_register.
961 if (range->HasRegisterAssigned()) continue;
962 auto assigned = range->GetAssignedOperand(operand_cache());
963 range->ConvertUsesToOperand(assigned);
964 if (range->IsSpilled()) {
965 range->CommitSpillsAtDefinition(code(), assigned);
971 SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
972 auto spill_range = new (local_zone()) SpillRange(range, local_zone());
973 spill_ranges().push_back(spill_range);
978 bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
979 if (range->IsChild() || !range->is_phi()) return false;
980 DCHECK(range->HasNoSpillType());
982 auto lookup = phi_map_.find(range->id());
983 DCHECK(lookup != phi_map_.end());
984 auto phi = lookup->second.phi;
985 auto block = lookup->second.block;
986 // Count the number of spilled operands.
987 size_t spilled_count = 0;
988 LiveRange* first_op = nullptr;
989 for (size_t i = 0; i < phi->operands().size(); i++) {
990 int op = phi->operands()[i];
991 LiveRange* op_range = LiveRangeFor(op);
992 if (op_range->GetSpillRange() == nullptr) continue;
993 auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
995 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
996 while (op_range != nullptr && !op_range->CanCover(pred_end)) {
997 op_range = op_range->next();
999 if (op_range != nullptr && op_range->IsSpilled()) {
1001 if (first_op == nullptr) {
1002 first_op = op_range->TopLevel();
1007 // Only continue if more than half of the operands are spilled.
1008 if (spilled_count * 2 <= phi->operands().size()) {
1012 // Try to merge the spilled operands and count the number of merged spilled
1014 DCHECK(first_op != nullptr);
1015 auto first_op_spill = first_op->GetSpillRange();
1016 size_t num_merged = 1;
1017 for (size_t i = 1; i < phi->operands().size(); i++) {
1018 int op = phi->operands()[i];
1019 auto op_range = LiveRangeFor(op);
1020 auto op_spill = op_range->GetSpillRange();
1021 if (op_spill != nullptr &&
1022 (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
1027 // Only continue if enough operands could be merged to the
1029 if (num_merged * 2 <= phi->operands().size() ||
1030 AreUseIntervalsIntersecting(first_op_spill->interval(),
1031 range->first_interval())) {
1035 // If the range does not need register soon, spill it to the merged
1037 auto next_pos = range->Start();
1038 if (code()->IsGapAt(next_pos.InstructionIndex())) {
1039 next_pos = next_pos.NextInstruction();
1041 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
1042 if (pos == nullptr) {
1043 auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
1044 CHECK(first_op_spill->TryMerge(spill_range));
1047 } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
1048 auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
1049 CHECK(first_op_spill->TryMerge(spill_range));
1050 SpillBetween(range, range->Start(), pos->pos());
1051 DCHECK(UnhandledIsSorted());
1058 void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
1059 int start = block->first_instruction_index();
1060 int end = block->last_instruction_index();
1061 DCHECK_NE(-1, start);
1062 for (int i = start; i <= end; ++i) {
1063 if (code()->IsGapAt(i)) {
1064 Instruction* instr = nullptr;
1065 Instruction* prev_instr = nullptr;
1066 if (i < end) instr = InstructionAt(i + 1);
1067 if (i > start) prev_instr = InstructionAt(i - 1);
1068 MeetConstraintsBetween(prev_instr, instr, i);
1072 // Meet register constraints for the instruction in the end.
1073 if (!code()->IsGapAt(end)) {
1074 MeetRegisterConstraintsForLastInstructionInBlock(block);
1079 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
1080 const InstructionBlock* block) {
1081 int end = block->last_instruction_index();
1082 auto last_instruction = InstructionAt(end);
1083 for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1084 auto output_operand = last_instruction->OutputAt(i);
1085 DCHECK(!output_operand->IsConstant());
1086 auto output = UnallocatedOperand::cast(output_operand);
1087 int output_vreg = output->virtual_register();
1088 auto range = LiveRangeFor(output_vreg);
1089 bool assigned = false;
1090 if (output->HasFixedPolicy()) {
1091 AllocateFixed(output, -1, false);
1092 // This value is produced on the stack, we never need to spill it.
1093 if (output->IsStackSlot()) {
1094 DCHECK(output->index() < frame_->GetSpillSlotCount());
1095 range->SetSpillOperand(output);
1096 range->SetSpillStartIndex(end);
1100 for (auto succ : block->successors()) {
1101 const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1102 DCHECK(successor->PredecessorCount() == 1);
1103 int gap_index = successor->first_instruction_index();
1104 DCHECK(code()->IsGapAt(gap_index));
1106 // Create an unconstrained operand for the same virtual register
1107 // and insert a gap move from the fixed output to the operand.
1108 UnallocatedOperand* output_copy =
1109 UnallocatedOperand(UnallocatedOperand::ANY, output_vreg)
1111 AddGapMove(gap_index, GapInstruction::START, output, output_copy);
1116 for (auto succ : block->successors()) {
1117 const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1118 DCHECK(successor->PredecessorCount() == 1);
1119 int gap_index = successor->first_instruction_index();
1120 range->SpillAtDefinition(local_zone(), gap_index, output);
1121 range->SetSpillStartIndex(gap_index);
1128 void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
1129 Instruction* second,
1131 if (first != nullptr) {
1132 // Handle fixed temporaries.
1133 for (size_t i = 0; i < first->TempCount(); i++) {
1134 auto temp = UnallocatedOperand::cast(first->TempAt(i));
1135 if (temp->HasFixedPolicy()) {
1136 AllocateFixed(temp, gap_index - 1, false);
1140 // Handle constant/fixed output operands.
1141 for (size_t i = 0; i < first->OutputCount(); i++) {
1142 InstructionOperand* output = first->OutputAt(i);
1143 if (output->IsConstant()) {
1144 int output_vreg = output->index();
1145 auto range = LiveRangeFor(output_vreg);
1146 range->SetSpillStartIndex(gap_index - 1);
1147 range->SetSpillOperand(output);
1149 auto first_output = UnallocatedOperand::cast(output);
1150 auto range = LiveRangeFor(first_output->virtual_register());
1151 bool assigned = false;
1152 if (first_output->HasFixedPolicy()) {
1153 auto output_copy = first_output->CopyUnconstrained(code_zone());
1154 bool is_tagged = HasTaggedValue(first_output->virtual_register());
1155 AllocateFixed(first_output, gap_index, is_tagged);
1157 // This value is produced on the stack, we never need to spill it.
1158 if (first_output->IsStackSlot()) {
1159 DCHECK(first_output->index() < frame_->GetSpillSlotCount());
1160 range->SetSpillOperand(first_output);
1161 range->SetSpillStartIndex(gap_index - 1);
1164 AddGapMove(gap_index, GapInstruction::START, first_output,
1168 // Make sure we add a gap move for spilling (if we have not done
1171 range->SpillAtDefinition(local_zone(), gap_index, first_output);
1172 range->SetSpillStartIndex(gap_index);
1178 if (second != nullptr) {
1179 // Handle fixed input operands of second instruction.
1180 for (size_t i = 0; i < second->InputCount(); i++) {
1181 auto input = second->InputAt(i);
1182 if (input->IsImmediate()) continue; // Ignore immediates.
1183 auto cur_input = UnallocatedOperand::cast(input);
1184 if (cur_input->HasFixedPolicy()) {
1185 auto input_copy = cur_input->CopyUnconstrained(code_zone());
1186 bool is_tagged = HasTaggedValue(cur_input->virtual_register());
1187 AllocateFixed(cur_input, gap_index + 1, is_tagged);
1188 AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
1192 // Handle "output same as input" for second instruction.
1193 for (size_t i = 0; i < second->OutputCount(); i++) {
1194 auto output = second->OutputAt(i);
1195 if (!output->IsUnallocated()) continue;
1196 auto second_output = UnallocatedOperand::cast(output);
1197 if (second_output->HasSameAsInputPolicy()) {
1198 DCHECK(i == 0); // Only valid for first output.
1199 UnallocatedOperand* cur_input =
1200 UnallocatedOperand::cast(second->InputAt(0));
1201 int output_vreg = second_output->virtual_register();
1202 int input_vreg = cur_input->virtual_register();
1204 auto input_copy = cur_input->CopyUnconstrained(code_zone());
1205 cur_input->set_virtual_register(second_output->virtual_register());
1206 AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
1208 if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
1209 int index = gap_index + 1;
1210 Instruction* instr = InstructionAt(index);
1211 if (instr->HasPointerMap()) {
1212 instr->pointer_map()->RecordPointer(input_copy, code_zone());
1214 } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
1215 // The input is assumed to immediately have a tagged representation,
1216 // before the pointer map can be used. I.e. the pointer map at the
1217 // instruction will include the output operand (whose value at the
1218 // beginning of the instruction is equal to the input operand). If
1219 // this is not desired, then the pointer map at this instruction needs
1220 // to be adjusted manually.
1228 bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
1229 for (size_t i = 0; i < instr->OutputCount(); i++) {
1230 auto output = instr->OutputAt(i);
1231 if (output->IsRegister() && output->index() == index) return true;
1237 bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
1239 for (size_t i = 0; i < instr->OutputCount(); i++) {
1240 auto output = instr->OutputAt(i);
1241 if (output->IsDoubleRegister() && output->index() == index) return true;
1247 void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
1249 int block_start = block->first_instruction_index();
1250 auto block_start_position =
1251 LifetimePosition::FromInstructionIndex(block_start);
1253 for (int index = block->last_instruction_index(); index >= block_start;
1255 auto curr_position = LifetimePosition::FromInstructionIndex(index);
1256 auto instr = InstructionAt(index);
1257 DCHECK(instr != nullptr);
1258 if (instr->IsGapMoves()) {
1259 // Process the moves of the gap instruction, making their sources live.
1260 auto gap = code()->GapAt(index);
1261 const GapInstruction::InnerPosition kPositions[] = {
1262 GapInstruction::END, GapInstruction::START};
1263 for (auto position : kPositions) {
1264 auto move = gap->GetParallelMove(position);
1265 if (move == nullptr) continue;
1266 if (position == GapInstruction::END) {
1267 curr_position = curr_position.InstructionEnd();
1269 curr_position = curr_position.InstructionStart();
1271 auto move_ops = move->move_operands();
1272 for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
1273 auto from = cur->source();
1274 auto to = cur->destination();
1276 if (to->IsUnallocated()) {
1277 int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
1278 auto to_range = LiveRangeFor(to_vreg);
1279 if (to_range->is_phi()) {
1280 DCHECK(!FLAG_turbo_delay_ssa_decon);
1281 if (to_range->is_non_loop_phi()) {
1282 hint = to_range->current_hint_operand();
1285 if (live->Contains(to_vreg)) {
1286 Define(curr_position, to, from);
1287 live->Remove(to_vreg);
1294 Define(curr_position, to, from);
1296 Use(block_start_position, curr_position, from, hint);
1297 if (from->IsUnallocated()) {
1298 live->Add(UnallocatedOperand::cast(from)->virtual_register());
1303 // Process output, inputs, and temps of this non-gap instruction.
1304 for (size_t i = 0; i < instr->OutputCount(); i++) {
1305 auto output = instr->OutputAt(i);
1306 if (output->IsUnallocated()) {
1307 int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
1308 live->Remove(out_vreg);
1309 } else if (output->IsConstant()) {
1310 int out_vreg = output->index();
1311 live->Remove(out_vreg);
1313 Define(curr_position, output, nullptr);
1316 if (instr->ClobbersRegisters()) {
1317 for (int i = 0; i < config()->num_general_registers(); ++i) {
1318 if (!IsOutputRegisterOf(instr, i)) {
1319 auto range = FixedLiveRangeFor(i);
1320 range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1326 if (instr->ClobbersDoubleRegisters()) {
1327 for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
1328 if (!IsOutputDoubleRegisterOf(instr, i)) {
1329 auto range = FixedDoubleLiveRangeFor(i);
1330 range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1336 for (size_t i = 0; i < instr->InputCount(); i++) {
1337 auto input = instr->InputAt(i);
1338 if (input->IsImmediate()) continue; // Ignore immediates.
1339 LifetimePosition use_pos;
1340 if (input->IsUnallocated() &&
1341 UnallocatedOperand::cast(input)->IsUsedAtStart()) {
1342 use_pos = curr_position;
1344 use_pos = curr_position.InstructionEnd();
1347 Use(block_start_position, use_pos, input, nullptr);
1348 if (input->IsUnallocated()) {
1349 live->Add(UnallocatedOperand::cast(input)->virtual_register());
1353 for (size_t i = 0; i < instr->TempCount(); i++) {
1354 auto temp = instr->TempAt(i);
1355 if (instr->ClobbersTemps()) {
1356 if (temp->IsRegister()) continue;
1357 if (temp->IsUnallocated()) {
1358 UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
1359 if (temp_unalloc->HasFixedPolicy()) {
1364 Use(block_start_position, curr_position.InstructionEnd(), temp,
1366 Define(curr_position, temp, nullptr);
1373 void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
1374 for (auto phi : block->phis()) {
1375 int phi_vreg = phi->virtual_register();
1377 phi_map_.insert(std::make_pair(phi_vreg, PhiMapValue(phi, block)));
1380 auto& output = phi->output();
1381 if (!FLAG_turbo_delay_ssa_decon) {
1382 for (size_t i = 0; i < phi->operands().size(); ++i) {
1383 InstructionBlock* cur_block =
1384 code()->InstructionBlockAt(block->predecessors()[i]);
1385 AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
1386 &phi->inputs()[i], &output);
1387 DCHECK(!InstructionAt(cur_block->last_instruction_index())
1391 auto live_range = LiveRangeFor(phi_vreg);
1392 int gap_index = block->first_instruction_index();
1393 live_range->SpillAtDefinition(local_zone(), gap_index, &output);
1394 live_range->SetSpillStartIndex(gap_index);
1395 // We use the phi-ness of some nodes in some later heuristics.
1396 live_range->set_is_phi(true);
1397 live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1402 void RegisterAllocator::MeetRegisterConstraints() {
1403 for (auto block : code()->instruction_blocks()) {
1404 MeetRegisterConstraints(block);
1409 void RegisterAllocator::ResolvePhis() {
1410 // Process the blocks in reverse order.
1411 for (auto i = code()->instruction_blocks().rbegin();
1412 i != code()->instruction_blocks().rend(); ++i) {
1418 ParallelMove* RegisterAllocator::GetConnectingParallelMove(
1419 LifetimePosition pos) {
1420 int index = pos.InstructionIndex();
1421 if (code()->IsGapAt(index)) {
1422 auto gap = code()->GapAt(index);
1423 return gap->GetOrCreateParallelMove(
1424 pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
1427 int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
1428 return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
1429 (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::START,
1434 const InstructionBlock* RegisterAllocator::GetInstructionBlock(
1435 LifetimePosition pos) {
1436 return code()->GetInstructionBlock(pos.InstructionIndex());
1440 void RegisterAllocator::ConnectRanges() {
1441 for (auto first_range : live_ranges()) {
1442 if (first_range == nullptr || first_range->IsChild()) continue;
1443 auto second_range = first_range->next();
1444 while (second_range != nullptr) {
1445 auto pos = second_range->Start();
1446 if (!second_range->IsSpilled()) {
1447 // Add gap move if the two live ranges touch and there is no block
1449 if (first_range->End().Value() == pos.Value()) {
1450 bool should_insert = true;
1451 if (IsBlockBoundary(pos)) {
1453 CanEagerlyResolveControlFlow(GetInstructionBlock(pos));
1455 if (should_insert) {
1456 auto move = GetConnectingParallelMove(pos);
1458 first_range->GetAssignedOperand(operand_cache());
1460 second_range->GetAssignedOperand(operand_cache());
1461 move->AddMove(prev_operand, cur_operand, code_zone());
1465 first_range = second_range;
1466 second_range = second_range->next();
1472 bool RegisterAllocator::CanEagerlyResolveControlFlow(
1473 const InstructionBlock* block) const {
1474 if (block->PredecessorCount() != 1) return false;
1475 return block->predecessors()[0].IsNext(block->rpo_number());
1481 class LiveRangeBound {
1483 explicit LiveRangeBound(const LiveRange* range)
1484 : range_(range), start_(range->Start()), end_(range->End()) {
1485 DCHECK(!range->IsEmpty());
1488 bool CanCover(LifetimePosition position) {
1489 return start_.Value() <= position.Value() &&
1490 position.Value() < end_.Value();
1493 const LiveRange* const range_;
1494 const LifetimePosition start_;
1495 const LifetimePosition end_;
1498 DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
1503 const LiveRange* cur_cover_;
1504 const LiveRange* pred_cover_;
1508 class LiveRangeBoundArray {
1510 LiveRangeBoundArray() : length_(0), start_(nullptr) {}
1512 bool ShouldInitialize() { return start_ == nullptr; }
1514 void Initialize(Zone* zone, const LiveRange* const range) {
1516 for (auto i = range; i != nullptr; i = i->next()) length++;
1517 start_ = zone->NewArray<LiveRangeBound>(length);
1520 for (auto i = range; i != nullptr; i = i->next(), ++curr) {
1521 new (curr) LiveRangeBound(i);
1525 LiveRangeBound* Find(const LifetimePosition position) const {
1526 size_t left_index = 0;
1527 size_t right_index = length_;
1529 size_t current_index = left_index + (right_index - left_index) / 2;
1530 DCHECK(right_index > current_index);
1531 auto bound = &start_[current_index];
1532 if (bound->start_.Value() <= position.Value()) {
1533 if (position.Value() < bound->end_.Value()) return bound;
1534 DCHECK(left_index < current_index);
1535 left_index = current_index;
1537 right_index = current_index;
1542 LiveRangeBound* FindPred(const InstructionBlock* pred) {
1544 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1545 return Find(pred_end);
1548 LiveRangeBound* FindSucc(const InstructionBlock* succ) {
1550 LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
1551 return Find(succ_start);
1554 void Find(const InstructionBlock* block, const InstructionBlock* pred,
1555 FindResult* result) const {
1557 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1558 auto bound = Find(pred_end);
1559 result->pred_cover_ = bound->range_;
1560 auto cur_start = LifetimePosition::FromInstructionIndex(
1561 block->first_instruction_index());
1563 if (bound->CanCover(cur_start)) {
1564 result->cur_cover_ = bound->range_;
1567 result->cur_cover_ = Find(cur_start)->range_;
1568 DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
1573 LiveRangeBound* start_;
1575 DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
1579 class LiveRangeFinder {
1581 explicit LiveRangeFinder(const RegisterAllocator& allocator)
1582 : allocator_(allocator),
1583 bounds_length_(static_cast<int>(allocator.live_ranges().size())),
1584 bounds_(allocator.local_zone()->NewArray<LiveRangeBoundArray>(
1586 for (int i = 0; i < bounds_length_; ++i) {
1587 new (&bounds_[i]) LiveRangeBoundArray();
1591 LiveRangeBoundArray* ArrayFor(int operand_index) {
1592 DCHECK(operand_index < bounds_length_);
1593 auto range = allocator_.live_ranges()[operand_index];
1594 DCHECK(range != nullptr && !range->IsEmpty());
1595 auto array = &bounds_[operand_index];
1596 if (array->ShouldInitialize()) {
1597 array->Initialize(allocator_.local_zone(), range);
1603 const RegisterAllocator& allocator_;
1604 const int bounds_length_;
1605 LiveRangeBoundArray* const bounds_;
1607 DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
1613 void RegisterAllocator::ResolveControlFlow() {
1614 // Lazily linearize live ranges in memory for fast lookup.
1615 LiveRangeFinder finder(*this);
1616 for (auto block : code()->instruction_blocks()) {
1617 if (CanEagerlyResolveControlFlow(block)) continue;
1618 if (FLAG_turbo_delay_ssa_decon) {
1620 for (auto phi : block->phis()) {
1622 finder.ArrayFor(phi->virtual_register())->FindSucc(block);
1624 block_bound->range_->GetAssignedOperand(operand_cache());
1625 phi->output().ConvertTo(phi_output->kind(), phi_output->index());
1626 size_t pred_index = 0;
1627 for (auto pred : block->predecessors()) {
1628 const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
1629 auto* pred_bound = finder.ArrayFor(phi->operands()[pred_index])
1630 ->FindPred(pred_block);
1632 pred_bound->range_->GetAssignedOperand(operand_cache());
1633 phi->inputs()[pred_index] = *pred_op;
1634 ResolveControlFlow(block, phi_output, pred_block, pred_op);
1639 auto live = live_in_sets_[block->rpo_number().ToInt()];
1640 BitVector::Iterator iterator(live);
1641 while (!iterator.Done()) {
1642 auto* array = finder.ArrayFor(iterator.Current());
1643 for (auto pred : block->predecessors()) {
1645 const auto* pred_block = code()->InstructionBlockAt(pred);
1646 array->Find(block, pred_block, &result);
1647 if (result.cur_cover_ == result.pred_cover_ ||
1648 result.cur_cover_->IsSpilled())
1650 auto pred_op = result.pred_cover_->GetAssignedOperand(operand_cache());
1651 auto cur_op = result.cur_cover_->GetAssignedOperand(operand_cache());
1652 ResolveControlFlow(block, cur_op, pred_block, pred_op);
1660 void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
1661 InstructionOperand* cur_op,
1662 const InstructionBlock* pred,
1663 InstructionOperand* pred_op) {
1664 if (pred_op->Equals(cur_op)) return;
1666 GapInstruction::InnerPosition position;
1667 if (block->PredecessorCount() == 1) {
1668 gap_index = block->first_instruction_index();
1669 position = GapInstruction::START;
1671 DCHECK(pred->SuccessorCount() == 1);
1672 DCHECK(!InstructionAt(pred->last_instruction_index())->HasPointerMap());
1673 gap_index = pred->last_instruction_index() - 1;
1674 position = GapInstruction::END;
1676 AddGapMove(gap_index, position, pred_op, cur_op);
1680 void RegisterAllocator::BuildLiveRanges() {
1681 // Process the blocks in reverse order.
1682 for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
1685 code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
1686 auto live = ComputeLiveOut(block);
1687 // Initially consider all live_out values live for the entire block. We
1688 // will shorten these intervals if necessary.
1689 AddInitialIntervals(block, live);
1691 // Process the instructions in reverse order, generating and killing
1693 ProcessInstructions(block, live);
1694 // All phi output operands are killed by this block.
1695 for (auto phi : block->phis()) {
1696 // The live range interval already ends at the first instruction of the
1698 int phi_vreg = phi->virtual_register();
1699 live->Remove(phi_vreg);
1700 if (!FLAG_turbo_delay_ssa_decon) {
1701 InstructionOperand* hint = nullptr;
1702 InstructionOperand* phi_operand = nullptr;
1704 GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
1706 gap->GetOrCreateParallelMove(GapInstruction::END, code_zone());
1707 for (int j = 0; j < move->move_operands()->length(); ++j) {
1708 auto to = move->move_operands()->at(j).destination();
1709 if (to->IsUnallocated() &&
1710 UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
1711 hint = move->move_operands()->at(j).source();
1716 DCHECK(hint != nullptr);
1717 auto block_start = LifetimePosition::FromInstructionIndex(
1718 block->first_instruction_index());
1719 Define(block_start, phi_operand, hint);
1723 // Now live is live_in for this block except not including values live
1724 // out on backward successor edges.
1725 live_in_sets_[block_id] = live;
1727 if (block->IsLoopHeader()) {
1728 // Add a live range stretching from the first loop instruction to the last
1729 // for each value live on entry to the header.
1730 BitVector::Iterator iterator(live);
1731 auto start = LifetimePosition::FromInstructionIndex(
1732 block->first_instruction_index());
1733 auto end = LifetimePosition::FromInstructionIndex(
1734 code()->LastLoopInstructionIndex(block)).NextInstruction();
1735 while (!iterator.Done()) {
1736 int operand_index = iterator.Current();
1737 auto range = LiveRangeFor(operand_index);
1738 range->EnsureInterval(start, end, local_zone());
1741 // Insert all values into the live in sets of all blocks in the loop.
1742 for (int i = block->rpo_number().ToInt() + 1;
1743 i < block->loop_end().ToInt(); ++i) {
1744 live_in_sets_[i]->Union(*live);
1749 for (auto range : live_ranges()) {
1750 if (range == nullptr) continue;
1751 range->kind_ = RequiredRegisterKind(range->id());
1752 // TODO(bmeurer): This is a horrible hack to make sure that for constant
1753 // live ranges, every use requires the constant to be in a register.
1754 // Without this hack, all uses with "any" policy would get the constant
1755 // operand assigned.
1756 if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
1757 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next_) {
1758 pos->register_beneficial_ = true;
1759 // TODO(dcarney): should the else case assert requires_reg_ == false?
1760 // Can't mark phis as needing a register.
1762 ->InstructionAt(pos->pos().InstructionIndex())
1764 pos->requires_reg_ = true;
1772 bool RegisterAllocator::ExistsUseWithoutDefinition() {
1774 BitVector::Iterator iterator(live_in_sets_[0]);
1775 while (!iterator.Done()) {
1777 int operand_index = iterator.Current();
1778 PrintF("Register allocator error: live v%d reached first block.\n",
1780 LiveRange* range = LiveRangeFor(operand_index);
1781 PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
1782 if (debug_name() == nullptr) {
1785 PrintF(" (function: %s)\n", debug_name());
1793 bool RegisterAllocator::SafePointsAreInOrder() const {
1795 for (auto map : *code()->pointer_maps()) {
1796 if (safe_point > map->instruction_position()) return false;
1797 safe_point = map->instruction_position();
1803 void RegisterAllocator::PopulatePointerMaps() {
1804 DCHECK(SafePointsAreInOrder());
1806 // Iterate over all safe point positions and record a pointer
1807 // for all spilled live ranges at this point.
1808 int last_range_start = 0;
1809 auto pointer_maps = code()->pointer_maps();
1810 PointerMapDeque::const_iterator first_it = pointer_maps->begin();
1811 for (LiveRange* range : live_ranges()) {
1812 if (range == nullptr) continue;
1813 // Iterate over the first parts of multi-part live ranges.
1814 if (range->IsChild()) continue;
1815 // Skip non-reference values.
1816 if (!HasTaggedValue(range->id())) continue;
1817 // Skip empty live ranges.
1818 if (range->IsEmpty()) continue;
1820 // Find the extent of the range and its children.
1821 int start = range->Start().InstructionIndex();
1823 for (auto cur = range; cur != nullptr; cur = cur->next()) {
1824 auto this_end = cur->End();
1825 if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
1826 DCHECK(cur->Start().InstructionIndex() >= start);
1829 // Most of the ranges are in order, but not all. Keep an eye on when they
1830 // step backwards and reset the first_it so we don't miss any safe points.
1831 if (start < last_range_start) first_it = pointer_maps->begin();
1832 last_range_start = start;
1834 // Step across all the safe points that are before the start of this range,
1835 // recording how far we step in order to save doing this for the next range.
1836 for (; first_it != pointer_maps->end(); ++first_it) {
1837 auto map = *first_it;
1838 if (map->instruction_position() >= start) break;
1841 // Step through the safe points to see whether they are in the range.
1842 for (auto it = first_it; it != pointer_maps->end(); ++it) {
1844 int safe_point = map->instruction_position();
1846 // The safe points are sorted so we can stop searching here.
1847 if (safe_point - 1 > end) break;
1849 // Advance to the next active range that covers the current
1850 // safe point position.
1851 auto safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point);
1853 while (cur != nullptr && !cur->Covers(safe_point_pos)) {
1856 if (cur == nullptr) continue;
1858 // Check if the live range is spilled and the safe point is after
1859 // the spill position.
1860 if (range->HasSpillOperand() &&
1861 safe_point >= range->spill_start_index() &&
1862 !range->GetSpillOperand()->IsConstant()) {
1863 TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
1864 range->id(), range->spill_start_index(), safe_point);
1865 map->RecordPointer(range->GetSpillOperand(), code_zone());
1868 if (!cur->IsSpilled()) {
1870 "Pointer in register for range %d (start at %d) "
1871 "at safe point %d\n",
1872 cur->id(), cur->Start().Value(), safe_point);
1873 InstructionOperand* operand = cur->GetAssignedOperand(operand_cache());
1874 DCHECK(!operand->IsStackSlot());
1875 map->RecordPointer(operand, code_zone());
1882 void RegisterAllocator::AllocateGeneralRegisters() {
1883 num_registers_ = config()->num_general_registers();
1884 mode_ = GENERAL_REGISTERS;
1885 AllocateRegisters();
1889 void RegisterAllocator::AllocateDoubleRegisters() {
1890 num_registers_ = config()->num_aliased_double_registers();
1891 mode_ = DOUBLE_REGISTERS;
1892 AllocateRegisters();
1896 void RegisterAllocator::AllocateRegisters() {
1897 DCHECK(unhandled_live_ranges().empty());
1899 for (auto range : live_ranges()) {
1900 if (range == nullptr) continue;
1901 if (range->Kind() == mode_) {
1902 AddToUnhandledUnsorted(range);
1906 DCHECK(UnhandledIsSorted());
1908 DCHECK(active_live_ranges().empty());
1909 DCHECK(inactive_live_ranges().empty());
1911 if (mode_ == DOUBLE_REGISTERS) {
1912 for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
1913 auto current = fixed_double_live_ranges()[i];
1914 if (current != nullptr) {
1915 AddToInactive(current);
1919 DCHECK(mode_ == GENERAL_REGISTERS);
1920 for (auto current : fixed_live_ranges()) {
1921 if (current != nullptr) {
1922 AddToInactive(current);
1927 while (!unhandled_live_ranges().empty()) {
1928 DCHECK(UnhandledIsSorted());
1929 auto current = unhandled_live_ranges().back();
1930 unhandled_live_ranges().pop_back();
1931 DCHECK(UnhandledIsSorted());
1932 auto position = current->Start();
1934 allocation_finger_ = position;
1936 TraceAlloc("Processing interval %d start=%d\n", current->id(),
1939 if (!current->HasNoSpillType()) {
1940 TraceAlloc("Live range %d already has a spill operand\n", current->id());
1941 auto next_pos = position;
1942 if (code()->IsGapAt(next_pos.InstructionIndex())) {
1943 next_pos = next_pos.NextInstruction();
1945 auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
1946 // If the range already has a spill operand and it doesn't need a
1947 // register immediately, split it and spill the first part of the range.
1948 if (pos == nullptr) {
1951 } else if (pos->pos().Value() >
1952 current->Start().NextInstruction().Value()) {
1953 // Do not spill live range eagerly if use position that can benefit from
1954 // the register is too close to the start of live range.
1955 SpillBetween(current, current->Start(), pos->pos());
1956 DCHECK(UnhandledIsSorted());
1961 if (TryReuseSpillForPhi(current)) continue;
1963 for (size_t i = 0; i < active_live_ranges().size(); ++i) {
1964 auto cur_active = active_live_ranges()[i];
1965 if (cur_active->End().Value() <= position.Value()) {
1966 ActiveToHandled(cur_active);
1967 --i; // The live range was removed from the list of active live ranges.
1968 } else if (!cur_active->Covers(position)) {
1969 ActiveToInactive(cur_active);
1970 --i; // The live range was removed from the list of active live ranges.
1974 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
1975 auto cur_inactive = inactive_live_ranges()[i];
1976 if (cur_inactive->End().Value() <= position.Value()) {
1977 InactiveToHandled(cur_inactive);
1978 --i; // Live range was removed from the list of inactive live ranges.
1979 } else if (cur_inactive->Covers(position)) {
1980 InactiveToActive(cur_inactive);
1981 --i; // Live range was removed from the list of inactive live ranges.
1985 DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
1987 bool result = TryAllocateFreeReg(current);
1988 if (!result) AllocateBlockedReg(current);
1989 if (current->HasRegisterAssigned()) {
1990 AddToActive(current);
1994 active_live_ranges().clear();
1995 inactive_live_ranges().clear();
1999 const char* RegisterAllocator::RegisterName(int allocation_index) {
2000 if (mode_ == GENERAL_REGISTERS) {
2001 return config()->general_register_name(allocation_index);
2003 return config()->double_register_name(allocation_index);
2008 bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
2009 return code()->IsReference(virtual_register);
2013 RegisterKind RegisterAllocator::RequiredRegisterKind(
2014 int virtual_register) const {
2015 return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
2016 : GENERAL_REGISTERS;
2020 void RegisterAllocator::AddToActive(LiveRange* range) {
2021 TraceAlloc("Add live range %d to active\n", range->id());
2022 active_live_ranges().push_back(range);
2026 void RegisterAllocator::AddToInactive(LiveRange* range) {
2027 TraceAlloc("Add live range %d to inactive\n", range->id());
2028 inactive_live_ranges().push_back(range);
2032 void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
2033 if (range == nullptr || range->IsEmpty()) return;
2034 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
2035 DCHECK(allocation_finger_.Value() <= range->Start().Value());
2036 for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
2038 auto cur_range = unhandled_live_ranges().at(i);
2039 if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
2040 TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
2041 auto it = unhandled_live_ranges().begin() + (i + 1);
2042 unhandled_live_ranges().insert(it, range);
2043 DCHECK(UnhandledIsSorted());
2046 TraceAlloc("Add live range %d to unhandled at start\n", range->id());
2047 unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
2048 DCHECK(UnhandledIsSorted());
2052 void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
2053 if (range == nullptr || range->IsEmpty()) return;
2054 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
2055 TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
2056 unhandled_live_ranges().push_back(range);
2060 static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
2061 DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
2062 if (a->ShouldBeAllocatedBefore(b)) return false;
2063 if (b->ShouldBeAllocatedBefore(a)) return true;
2064 return a->id() < b->id();
2068 // Sort the unhandled live ranges so that the ranges to be processed first are
2069 // at the end of the array list. This is convenient for the register allocation
2070 // algorithm because it is efficient to remove elements from the end.
2071 void RegisterAllocator::SortUnhandled() {
2072 TraceAlloc("Sort unhandled\n");
2073 std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
2074 &UnhandledSortHelper);
2078 bool RegisterAllocator::UnhandledIsSorted() {
2079 size_t len = unhandled_live_ranges().size();
2080 for (size_t i = 1; i < len; i++) {
2081 auto a = unhandled_live_ranges().at(i - 1);
2082 auto b = unhandled_live_ranges().at(i);
2083 if (a->Start().Value() < b->Start().Value()) return false;
2089 void RegisterAllocator::ActiveToHandled(LiveRange* range) {
2090 RemoveElement(&active_live_ranges(), range);
2091 TraceAlloc("Moving live range %d from active to handled\n", range->id());
2095 void RegisterAllocator::ActiveToInactive(LiveRange* range) {
2096 RemoveElement(&active_live_ranges(), range);
2097 inactive_live_ranges().push_back(range);
2098 TraceAlloc("Moving live range %d from active to inactive\n", range->id());
2102 void RegisterAllocator::InactiveToHandled(LiveRange* range) {
2103 RemoveElement(&inactive_live_ranges(), range);
2104 TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
2108 void RegisterAllocator::InactiveToActive(LiveRange* range) {
2109 RemoveElement(&inactive_live_ranges(), range);
2110 active_live_ranges().push_back(range);
2111 TraceAlloc("Moving live range %d from inactive to active\n", range->id());
2115 bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
2116 LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
2118 for (int i = 0; i < num_registers_; i++) {
2119 free_until_pos[i] = LifetimePosition::MaxPosition();
2122 for (auto cur_active : active_live_ranges()) {
2123 free_until_pos[cur_active->assigned_register()] =
2124 LifetimePosition::FromInstructionIndex(0);
2127 for (auto cur_inactive : inactive_live_ranges()) {
2128 DCHECK(cur_inactive->End().Value() > current->Start().Value());
2129 auto next_intersection = cur_inactive->FirstIntersection(current);
2130 if (!next_intersection.IsValid()) continue;
2131 int cur_reg = cur_inactive->assigned_register();
2132 free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
2135 auto hint = current->FirstHint();
2136 if (hint != nullptr && (hint->IsRegister() || hint->IsDoubleRegister())) {
2137 int register_index = hint->index();
2139 "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
2140 RegisterName(register_index), free_until_pos[register_index].Value(),
2141 current->id(), current->End().Value());
2143 // The desired register is free until the end of the current live range.
2144 if (free_until_pos[register_index].Value() >= current->End().Value()) {
2145 TraceAlloc("Assigning preferred reg %s to live range %d\n",
2146 RegisterName(register_index), current->id());
2147 SetLiveRangeAssignedRegister(current, register_index);
2152 // Find the register which stays free for the longest time.
2154 for (int i = 1; i < RegisterCount(); ++i) {
2155 if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
2160 auto pos = free_until_pos[reg];
2162 if (pos.Value() <= current->Start().Value()) {
2163 // All registers are blocked.
2167 if (pos.Value() < current->End().Value()) {
2168 // Register reg is available at the range start but becomes blocked before
2169 // the range end. Split current at position where it becomes blocked.
2170 auto tail = SplitRangeAt(current, pos);
2171 AddToUnhandledSorted(tail);
2174 // Register reg is available at the range start and is free until
2176 DCHECK(pos.Value() >= current->End().Value());
2177 TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
2179 SetLiveRangeAssignedRegister(current, reg);
2185 void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
2186 auto register_use = current->NextRegisterPosition(current->Start());
2187 if (register_use == nullptr) {
2188 // There is no use in the current live range that requires a register.
2189 // We can just spill it.
2194 LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
2195 LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
2197 for (int i = 0; i < num_registers_; i++) {
2198 use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
2201 for (auto range : active_live_ranges()) {
2202 int cur_reg = range->assigned_register();
2203 if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
2204 block_pos[cur_reg] = use_pos[cur_reg] =
2205 LifetimePosition::FromInstructionIndex(0);
2208 range->NextUsePositionRegisterIsBeneficial(current->Start());
2209 if (next_use == nullptr) {
2210 use_pos[cur_reg] = range->End();
2212 use_pos[cur_reg] = next_use->pos();
2217 for (auto range : inactive_live_ranges()) {
2218 DCHECK(range->End().Value() > current->Start().Value());
2219 auto next_intersection = range->FirstIntersection(current);
2220 if (!next_intersection.IsValid()) continue;
2221 int cur_reg = range->assigned_register();
2222 if (range->IsFixed()) {
2223 block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
2224 use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
2226 use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
2231 for (int i = 1; i < RegisterCount(); ++i) {
2232 if (use_pos[i].Value() > use_pos[reg].Value()) {
2237 auto pos = use_pos[reg];
2239 if (pos.Value() < register_use->pos().Value()) {
2240 // All registers are blocked before the first use that requires a register.
2241 // Spill starting part of live range up to that use.
2242 SpillBetween(current, current->Start(), register_use->pos());
2246 if (block_pos[reg].Value() < current->End().Value()) {
2247 // Register becomes blocked before the current range end. Split before that
2249 LiveRange* tail = SplitBetween(current, current->Start(),
2250 block_pos[reg].InstructionStart());
2251 AddToUnhandledSorted(tail);
2254 // Register reg is not blocked for the whole range.
2255 DCHECK(block_pos[reg].Value() >= current->End().Value());
2256 TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
2258 SetLiveRangeAssignedRegister(current, reg);
2260 // This register was not free. Thus we need to find and spill
2261 // parts of active and inactive live regions that use the same register
2262 // at the same lifetime positions as current.
2263 SplitAndSpillIntersecting(current);
2267 static const InstructionBlock* GetContainingLoop(
2268 const InstructionSequence* sequence, const InstructionBlock* block) {
2269 auto index = block->loop_header();
2270 if (!index.IsValid()) return nullptr;
2271 return sequence->InstructionBlockAt(index);
2275 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2276 LiveRange* range, LifetimePosition pos) {
2277 auto block = GetInstructionBlock(pos.InstructionStart());
2279 block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2281 if (loop_header == nullptr) return pos;
2283 auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
2285 while (loop_header != nullptr) {
2286 // We are going to spill live range inside the loop.
2287 // If possible try to move spilling position backwards to loop header.
2288 // This will reduce number of memory moves on the back edge.
2289 auto loop_start = LifetimePosition::FromInstructionIndex(
2290 loop_header->first_instruction_index());
2292 if (range->Covers(loop_start)) {
2293 if (prev_use == nullptr || prev_use->pos().Value() < loop_start.Value()) {
2294 // No register beneficial use inside the loop before the pos.
2299 // Try hoisting out to an outer loop.
2300 loop_header = GetContainingLoop(code(), loop_header);
2307 void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
2308 DCHECK(current->HasRegisterAssigned());
2309 int reg = current->assigned_register();
2310 auto split_pos = current->Start();
2311 for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2312 auto range = active_live_ranges()[i];
2313 if (range->assigned_register() == reg) {
2314 auto next_pos = range->NextRegisterPosition(current->Start());
2315 auto spill_pos = FindOptimalSpillingPos(range, split_pos);
2316 if (next_pos == nullptr) {
2317 SpillAfter(range, spill_pos);
2319 // When spilling between spill_pos and next_pos ensure that the range
2320 // remains spilled at least until the start of the current live range.
2321 // This guarantees that we will not introduce new unhandled ranges that
2322 // start before the current range as this violates allocation invariant
2323 // and will lead to an inconsistent state of active and inactive
2324 // live-ranges: ranges are allocated in order of their start positions,
2325 // ranges are retired from active/inactive when the start of the
2326 // current live-range is larger than their end.
2327 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2329 ActiveToHandled(range);
2334 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2335 auto range = inactive_live_ranges()[i];
2336 DCHECK(range->End().Value() > current->Start().Value());
2337 if (range->assigned_register() == reg && !range->IsFixed()) {
2338 LifetimePosition next_intersection = range->FirstIntersection(current);
2339 if (next_intersection.IsValid()) {
2340 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2341 if (next_pos == nullptr) {
2342 SpillAfter(range, split_pos);
2344 next_intersection = Min(next_intersection, next_pos->pos());
2345 SpillBetween(range, split_pos, next_intersection);
2347 InactiveToHandled(range);
2355 bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
2356 return pos.IsInstructionStart() &&
2357 code()->GetInstructionBlock(pos.InstructionIndex())->code_start() ==
2358 pos.InstructionIndex();
2362 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2363 LifetimePosition pos) {
2364 DCHECK(!range->IsFixed());
2365 TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
2367 if (pos.Value() <= range->Start().Value()) return range;
2369 // We can't properly connect liveranges if split occured at the end
2370 // of control instruction.
2371 DCHECK(pos.IsInstructionStart() ||
2372 !InstructionAt(pos.InstructionIndex())->IsControl());
2374 int vreg = GetVirtualRegister();
2375 auto result = LiveRangeFor(vreg);
2376 range->SplitAt(pos, result, local_zone());
2381 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2382 LifetimePosition start,
2383 LifetimePosition end) {
2384 DCHECK(!range->IsFixed());
2385 TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
2386 range->id(), start.Value(), end.Value());
2388 auto split_pos = FindOptimalSplitPos(start, end);
2389 DCHECK(split_pos.Value() >= start.Value());
2390 return SplitRangeAt(range, split_pos);
2394 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2395 LifetimePosition end) {
2396 int start_instr = start.InstructionIndex();
2397 int end_instr = end.InstructionIndex();
2398 DCHECK(start_instr <= end_instr);
2400 // We have no choice
2401 if (start_instr == end_instr) return end;
2403 auto start_block = GetInstructionBlock(start);
2404 auto end_block = GetInstructionBlock(end);
2406 if (end_block == start_block) {
2407 // The interval is split in the same basic block. Split at the latest
2408 // possible position.
2412 auto block = end_block;
2413 // Find header of outermost loop.
2414 // TODO(titzer): fix redundancy below.
2415 while (GetContainingLoop(code(), block) != nullptr &&
2416 GetContainingLoop(code(), block)->rpo_number().ToInt() >
2417 start_block->rpo_number().ToInt()) {
2418 block = GetContainingLoop(code(), block);
2421 // We did not find any suitable outer loop. Split at the latest possible
2422 // position unless end_block is a loop header itself.
2423 if (block == end_block && !end_block->IsLoopHeader()) return end;
2425 return LifetimePosition::FromInstructionIndex(
2426 block->first_instruction_index());
2430 void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2431 auto second_part = SplitRangeAt(range, pos);
2436 void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
2437 LifetimePosition end) {
2438 SpillBetweenUntil(range, start, start, end);
2442 void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
2443 LifetimePosition start,
2444 LifetimePosition until,
2445 LifetimePosition end) {
2446 CHECK(start.Value() < end.Value());
2447 auto second_part = SplitRangeAt(range, start);
2449 if (second_part->Start().Value() < end.Value()) {
2450 // The split result intersects with [start, end[.
2451 // Split it at position between ]start+1, end[, spill the middle part
2452 // and put the rest to unhandled.
2453 auto third_part_end = end.PrevInstruction().InstructionEnd();
2454 if (IsBlockBoundary(end.InstructionStart())) {
2455 third_part_end = end.InstructionStart();
2457 auto third_part = SplitBetween(
2458 second_part, Max(second_part->Start().InstructionEnd(), until),
2461 DCHECK(third_part != second_part);
2464 AddToUnhandledSorted(third_part);
2466 // The split result does not intersect with [start, end[.
2467 // Nothing to spill. Just put it to unhandled as whole.
2468 AddToUnhandledSorted(second_part);
2473 void RegisterAllocator::Spill(LiveRange* range) {
2474 DCHECK(!range->IsSpilled());
2475 TraceAlloc("Spilling live range %d\n", range->id());
2476 auto first = range->TopLevel();
2477 if (first->HasNoSpillType()) {
2478 AssignSpillRangeToLiveRange(first);
2480 range->MakeSpilled();
2484 int RegisterAllocator::RegisterCount() const { return num_registers_; }
2490 void RegisterAllocator::Verify() const {
2491 for (auto current : live_ranges()) {
2492 if (current != nullptr) current->Verify();
2500 void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2502 if (range->Kind() == DOUBLE_REGISTERS) {
2503 assigned_double_registers_->Add(reg);
2505 DCHECK(range->Kind() == GENERAL_REGISTERS);
2506 assigned_registers_->Add(reg);
2508 range->set_assigned_register(reg, operand_cache());
2511 } // namespace compiler
2512 } // namespace internal