From 497c537310d72064a23bb0ea3e4e708460b7dbf4 Mon Sep 17 00:00:00 2001 From: dcarney Date: Mon, 20 Apr 2015 09:15:34 -0700 Subject: [PATCH] [turbofan] split register allocator into little pieces R=titzer@chromium.org BUG= Review URL: https://codereview.chromium.org/1094063002 Cr-Commit-Position: refs/heads/master@{#27946} --- src/compiler/graph-visualizer.cc | 17 +- src/compiler/graph-visualizer.h | 15 +- src/compiler/pipeline.cc | 94 +- src/compiler/register-allocator.cc | 1761 ++++++++++++++++++------------------ src/compiler/register-allocator.h | 365 +++++--- 5 files changed, 1201 insertions(+), 1051 deletions(-) diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc index 0d041ec..173db9c 100644 --- a/src/compiler/graph-visualizer.cc +++ b/src/compiler/graph-visualizer.cc @@ -407,7 +407,7 @@ class GraphC1Visualizer { void PrintSchedule(const char* phase, const Schedule* schedule, const SourcePositionTable* positions, const InstructionSequence* instructions); - void PrintAllocator(const char* phase, const RegisterAllocator* allocator); + void PrintLiveRanges(const char* phase, const RegisterAllocationData* data); Zone* zone() const { return zone_; } private: @@ -693,20 +693,20 @@ void GraphC1Visualizer::PrintSchedule(const char* phase, } -void GraphC1Visualizer::PrintAllocator(const char* phase, - const RegisterAllocator* allocator) { +void GraphC1Visualizer::PrintLiveRanges(const char* phase, + const RegisterAllocationData* data) { Tag tag(this, "intervals"); PrintStringProperty("name", phase); - for (auto range : allocator->fixed_double_live_ranges()) { + for (auto range : data->fixed_double_live_ranges()) { PrintLiveRange(range, "fixed"); } - for (auto range : allocator->fixed_live_ranges()) { + for (auto range : data->fixed_live_ranges()) { PrintLiveRange(range, "fixed"); } - for (auto range : allocator->live_ranges()) { + for (auto range : data->live_ranges()) { PrintLiveRange(range, "object"); } } @@ -791,9 +791,10 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) { } -std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac) { +std::ostream& operator<<(std::ostream& os, + const AsC1VRegisterAllocationData& ac) { Zone tmp_zone; - GraphC1Visualizer(os, &tmp_zone).PrintAllocator(ac.phase_, ac.allocator_); + GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_); return os; } diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h index 17094c2..10ea538 100644 --- a/src/compiler/graph-visualizer.h +++ b/src/compiler/graph-visualizer.h @@ -16,7 +16,7 @@ namespace compiler { class Graph; class InstructionSequence; -class RegisterAllocator; +class RegisterAllocationData; class Schedule; class SourcePositionTable; @@ -67,18 +67,19 @@ struct AsC1V { const char* phase_; }; -struct AsC1VAllocator { - explicit AsC1VAllocator(const char* phase, - const RegisterAllocator* allocator = NULL) - : phase_(phase), allocator_(allocator) {} +struct AsC1VRegisterAllocationData { + explicit AsC1VRegisterAllocationData( + const char* phase, const RegisterAllocationData* data = nullptr) + : phase_(phase), data_(data) {} const char* phase_; - const RegisterAllocator* allocator_; + const RegisterAllocationData* data_; }; std::ostream& operator<<(std::ostream& os, const AsDOT& ad); std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac); std::ostream& operator<<(std::ostream& os, const AsC1V& ac); -std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac); +std::ostream& operator<<(std::ostream& os, + const AsC1VRegisterAllocationData& ac); } // namespace compiler } // namespace internal diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 5d8135e..ef1c83f 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -81,7 +81,9 @@ class PipelineData { instruction_zone_(instruction_zone_scope_.zone()), sequence_(nullptr), frame_(nullptr), - register_allocator_(nullptr) { + register_allocation_zone_scope_(zone_pool_), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + register_allocation_data_(nullptr) { PhaseScope scope(pipeline_statistics, "init pipeline data"); graph_ = new (graph_zone_) Graph(graph_zone_); source_positions_.Reset(new SourcePositionTable(graph_)); @@ -121,7 +123,9 @@ class PipelineData { instruction_zone_(instruction_zone_scope_.zone()), sequence_(nullptr), frame_(nullptr), - register_allocator_(nullptr) {} + register_allocation_zone_scope_(zone_pool_), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + register_allocation_data_(nullptr) {} // For register allocation testing entry point. PipelineData(ZonePool* zone_pool, CompilationInfo* info, @@ -148,9 +152,12 @@ class PipelineData { instruction_zone_(sequence->zone()), sequence_(sequence), frame_(nullptr), - register_allocator_(nullptr) {} + register_allocation_zone_scope_(zone_pool_), + register_allocation_zone_(register_allocation_zone_scope_.zone()), + register_allocation_data_(nullptr) {} ~PipelineData() { + DeleteRegisterAllocationZone(); DeleteInstructionZone(); DeleteGraphZone(); } @@ -200,7 +207,11 @@ class PipelineData { Zone* instruction_zone() const { return instruction_zone_; } InstructionSequence* sequence() const { return sequence_; } Frame* frame() const { return frame_; } - RegisterAllocator* register_allocator() const { return register_allocator_; } + + Zone* register_allocation_zone() const { return register_allocation_zone_; } + RegisterAllocationData* register_allocation_data() const { + return register_allocation_data_; + } void DeleteGraphZone() { // Destroy objects with destructors first. @@ -226,11 +237,17 @@ class PipelineData { instruction_zone_ = nullptr; sequence_ = nullptr; frame_ = nullptr; - register_allocator_ = nullptr; + } + + void DeleteRegisterAllocationZone() { + if (register_allocation_zone_ == nullptr) return; + register_allocation_zone_scope_.Destroy(); + register_allocation_zone_ = nullptr; + register_allocation_data_ = nullptr; } void InitializeInstructionSequence() { - DCHECK(!sequence_); + DCHECK(sequence_ == nullptr); InstructionBlocks* instruction_blocks = InstructionSequence::InstructionBlocksFor(instruction_zone(), schedule()); @@ -238,14 +255,14 @@ class PipelineData { info()->isolate(), instruction_zone(), instruction_blocks); } - void InitializeRegisterAllocator(Zone* local_zone, - const RegisterConfiguration* config, - const char* debug_name) { - DCHECK(!register_allocator_); - DCHECK(!frame_); + void InitializeLiveRangeBuilder(const RegisterConfiguration* config, + const char* debug_name) { + DCHECK(frame_ == nullptr); + DCHECK(register_allocation_data_ == nullptr); frame_ = new (instruction_zone()) Frame(); - register_allocator_ = new (instruction_zone()) - RegisterAllocator(config, local_zone, frame(), sequence(), debug_name); + register_allocation_data_ = new (register_allocation_zone()) + RegisterAllocationData(config, register_allocation_zone(), frame(), + sequence(), debug_name); } private: @@ -281,7 +298,13 @@ class PipelineData { Zone* instruction_zone_; InstructionSequence* sequence_; Frame* frame_; - RegisterAllocator* register_allocator_; + + // All objects in the following group of fields are allocated in + // register_allocation_zone_. They are all set to NULL when the zone is + // destroyed. + ZonePool::Scope register_allocation_zone_scope_; + Zone* register_allocation_zone_; + RegisterAllocationData* register_allocation_data_; DISALLOW_COPY_AND_ASSIGN(PipelineData); }; @@ -693,7 +716,8 @@ struct MeetRegisterConstraintsPhase { static const char* phase_name() { return "meet register constraints"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->MeetRegisterConstraints(); + LiveRangeBuilder builder(data->register_allocation_data()); + builder.MeetRegisterConstraints(); } }; @@ -702,7 +726,8 @@ struct ResolvePhisPhase { static const char* phase_name() { return "resolve phis"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->ResolvePhis(); + LiveRangeBuilder builder(data->register_allocation_data()); + builder.ResolvePhis(); } }; @@ -711,7 +736,8 @@ struct BuildLiveRangesPhase { static const char* phase_name() { return "build live ranges"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->BuildLiveRanges(); + LiveRangeBuilder builder(data->register_allocation_data()); + builder.BuildLiveRanges(); } }; @@ -720,7 +746,9 @@ struct AllocateGeneralRegistersPhase { static const char* phase_name() { return "allocate general registers"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->AllocateGeneralRegisters(); + LinearScanAllocator allocator(data->register_allocation_data(), + GENERAL_REGISTERS); + allocator.AllocateRegisters(); } }; @@ -729,7 +757,9 @@ struct AllocateDoubleRegistersPhase { static const char* phase_name() { return "allocate double registers"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->AllocateDoubleRegisters(); + LinearScanAllocator allocator(data->register_allocation_data(), + DOUBLE_REGISTERS); + allocator.AllocateRegisters(); } }; @@ -738,7 +768,8 @@ struct AssignSpillSlotsPhase { static const char* phase_name() { return "assign spill slots"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->AssignSpillSlots(); + OperandAssigner assigner(data->register_allocation_data()); + assigner.AssignSpillSlots(); } }; @@ -747,7 +778,8 @@ struct CommitAssignmentPhase { static const char* phase_name() { return "commit assignment"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->CommitAssignment(); + OperandAssigner assigner(data->register_allocation_data()); + assigner.CommitAssignment(); } }; @@ -756,7 +788,8 @@ struct PopulateReferenceMapsPhase { static const char* phase_name() { return "populate pointer maps"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->PopulateReferenceMaps(); + ReferenceMapPopulator populator(data->register_allocation_data()); + populator.PopulateReferenceMaps(); } }; @@ -765,7 +798,8 @@ struct ConnectRangesPhase { static const char* phase_name() { return "connect ranges"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->ConnectRanges(); + LiveRangeConnector connector(data->register_allocation_data()); + connector.ConnectRanges(temp_zone); } }; @@ -774,7 +808,8 @@ struct ResolveControlFlowPhase { static const char* phase_name() { return "resolve control flow"; } void Run(PipelineData* data, Zone* temp_zone) { - data->register_allocator()->ResolveControlFlow(); + LiveRangeConnector connector(data->register_allocation_data()); + connector.ResolveControlFlow(); } }; @@ -1216,9 +1251,7 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config, debug_name = GetDebugName(data->info()); #endif - ZonePool::Scope zone_scope(data->zone_pool()); - data->InitializeRegisterAllocator(zone_scope.zone(), config, - debug_name.get()); + data->InitializeLiveRangeBuilder(config, debug_name.get()); if (info()->is_osr()) { OsrHelper osr_helper(info()); osr_helper.SetupFrame(data->frame()); @@ -1234,7 +1267,7 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config, << printable; } if (verifier != nullptr) { - CHECK(!data->register_allocator()->ExistsUseWithoutDefinition()); + CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition()); } Run(); Run(); @@ -1262,8 +1295,11 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config, if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) { TurboCfgFile tcf(data->isolate()); - tcf << AsC1VAllocator("CodeGen", data->register_allocator()); + tcf << AsC1VRegisterAllocationData("CodeGen", + data->register_allocation_data()); } + + data->DeleteRegisterAllocationZone(); } } // namespace compiler diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc index 9d745d0..ec4120e 100644 --- a/src/compiler/register-allocator.cc +++ b/src/compiler/register-allocator.cc @@ -15,22 +15,26 @@ namespace compiler { if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ } while (false) -static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) { +namespace { + +inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) { return a.Value() < b.Value() ? a : b; } -static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) { +inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) { return a.Value() > b.Value() ? a : b; } -static void RemoveElement(ZoneVector* v, LiveRange* range) { +void RemoveElement(ZoneVector* v, LiveRange* range) { auto it = std::find(v->begin(), v->end(), range); DCHECK(it != v->end()); v->erase(it); } +} // namespace + UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, InstructionOperand* hint) @@ -85,14 +89,33 @@ struct LiveRange::SpillAtDefinitionList : ZoneObject { }; -#ifdef DEBUG +LiveRange::LiveRange(int id, Zone* zone) + : id_(id), + spilled_(false), + has_slot_use_(false), + is_phi_(false), + is_non_loop_phi_(false), + kind_(UNALLOCATED_REGISTERS), + assigned_register_(kInvalidAssignment), + last_interval_(nullptr), + first_interval_(nullptr), + first_pos_(nullptr), + parent_(nullptr), + next_(nullptr), + current_interval_(nullptr), + last_processed_use_(nullptr), + current_hint_operand_(nullptr), + spill_start_index_(kMaxInt), + spill_type_(SpillType::kNoSpillType), + spill_operand_(nullptr), + spills_at_definition_(nullptr) {} void LiveRange::Verify() const { UsePosition* cur = first_pos_; while (cur != nullptr) { - DCHECK(Start().Value() <= cur->pos().Value() && - cur->pos().Value() <= End().Value()); + CHECK(Start().Value() <= cur->pos().Value() && + cur->pos().Value() <= End().Value()); cur = cur->next(); } } @@ -112,31 +135,6 @@ bool LiveRange::HasOverlap(UseInterval* target) const { } -#endif - - -LiveRange::LiveRange(int id, Zone* zone) - : id_(id), - spilled_(false), - has_slot_use_(false), - is_phi_(false), - is_non_loop_phi_(false), - kind_(UNALLOCATED_REGISTERS), - assigned_register_(kInvalidAssignment), - last_interval_(nullptr), - first_interval_(nullptr), - first_pos_(nullptr), - parent_(nullptr), - next_(nullptr), - current_interval_(nullptr), - last_processed_use_(nullptr), - current_hint_operand_(nullptr), - spill_start_index_(kMaxInt), - spill_type_(SpillType::kNoSpillType), - spill_operand_(nullptr), - spills_at_definition_(nullptr) {} - - void LiveRange::set_assigned_register(int reg) { DCHECK(!HasRegisterAssigned() && !IsSpilled()); assigned_register_ = reg; @@ -571,244 +569,6 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) { } -RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config, - Zone* zone, Frame* frame, - InstructionSequence* code, - const char* debug_name) - : local_zone_(zone), - frame_(frame), - code_(code), - debug_name_(debug_name), - config_(config), - phi_map_(local_zone()), - live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()), - live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()), - fixed_live_ranges_(this->config()->num_general_registers(), nullptr, - local_zone()), - fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr, - local_zone()), - unhandled_live_ranges_(local_zone()), - active_live_ranges_(local_zone()), - inactive_live_ranges_(local_zone()), - spill_ranges_(local_zone()), - mode_(UNALLOCATED_REGISTERS), - num_registers_(-1) { - DCHECK(this->config()->num_general_registers() <= - RegisterConfiguration::kMaxGeneralRegisters); - DCHECK(this->config()->num_double_registers() <= - RegisterConfiguration::kMaxDoubleRegisters); - // TryAllocateFreeReg and AllocateBlockedReg assume this - // when allocating local arrays. - DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= - this->config()->num_general_registers()); - unhandled_live_ranges().reserve( - static_cast(code->VirtualRegisterCount() * 2)); - active_live_ranges().reserve(8); - inactive_live_ranges().reserve(8); - spill_ranges().reserve(8); - assigned_registers_ = - new (code_zone()) BitVector(config->num_general_registers(), code_zone()); - assigned_double_registers_ = new (code_zone()) - BitVector(config->num_aliased_double_registers(), code_zone()); - frame->SetAllocatedRegisters(assigned_registers_); - frame->SetAllocatedDoubleRegisters(assigned_double_registers_); -} - - -BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) { - // Compute live out for the given block, except not including backward - // successor edges. - auto live_out = new (local_zone()) - BitVector(code()->VirtualRegisterCount(), local_zone()); - - // Process all successor blocks. - for (auto succ : block->successors()) { - // Add values live on entry to the successor. Note the successor's - // live_in will not be computed yet for backwards edges. - auto live_in = live_in_sets_[succ.ToSize()]; - if (live_in != nullptr) live_out->Union(*live_in); - - // All phi input operands corresponding to this successor edge are live - // out from this block. - auto successor = code()->InstructionBlockAt(succ); - size_t index = successor->PredecessorIndexOf(block->rpo_number()); - DCHECK(index < successor->PredecessorCount()); - for (auto phi : successor->phis()) { - live_out->Add(phi->operands()[index]); - } - } - return live_out; -} - - -void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block, - BitVector* live_out) { - // Add an interval that includes the entire block to the live range for - // each live_out value. - auto start = LifetimePosition::GapFromInstructionIndex( - block->first_instruction_index()); - auto end = LifetimePosition::InstructionFromInstructionIndex( - block->last_instruction_index()).NextStart(); - BitVector::Iterator iterator(live_out); - while (!iterator.Done()) { - int operand_index = iterator.Current(); - auto range = LiveRangeFor(operand_index); - range->AddUseInterval(start, end, local_zone()); - iterator.Advance(); - } -} - - -int RegisterAllocator::FixedDoubleLiveRangeID(int index) { - return -index - 1 - config()->num_general_registers(); -} - - -InstructionOperand* RegisterAllocator::AllocateFixed( - UnallocatedOperand* operand, int pos, bool is_tagged) { - TRACE("Allocating fixed reg for op %d\n", operand->virtual_register()); - DCHECK(operand->HasFixedPolicy()); - InstructionOperand allocated; - if (operand->HasFixedSlotPolicy()) { - allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, - operand->fixed_slot_index()); - } else if (operand->HasFixedRegisterPolicy()) { - allocated = AllocatedOperand(AllocatedOperand::REGISTER, - operand->fixed_register_index()); - } else if (operand->HasFixedDoubleRegisterPolicy()) { - allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER, - operand->fixed_register_index()); - } else { - UNREACHABLE(); - } - InstructionOperand::ReplaceWith(operand, &allocated); - if (is_tagged) { - TRACE("Fixed reg is tagged at %d\n", pos); - auto instr = InstructionAt(pos); - if (instr->HasReferenceMap()) { - instr->reference_map()->RecordReference(*operand); - } - } - return operand; -} - - -LiveRange* RegisterAllocator::NewLiveRange(int index) { - // The LiveRange object itself can go in the local zone, but the - // InstructionOperand needs to go in the code zone, since it may survive - // register allocation. - return new (local_zone()) LiveRange(index, code_zone()); -} - - -LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) { - DCHECK(index < config()->num_general_registers()); - auto result = fixed_live_ranges()[index]; - if (result == nullptr) { - result = NewLiveRange(FixedLiveRangeID(index)); - DCHECK(result->IsFixed()); - result->kind_ = GENERAL_REGISTERS; - SetLiveRangeAssignedRegister(result, index); - fixed_live_ranges()[index] = result; - } - return result; -} - - -LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) { - DCHECK(index < config()->num_aliased_double_registers()); - auto result = fixed_double_live_ranges()[index]; - if (result == nullptr) { - result = NewLiveRange(FixedDoubleLiveRangeID(index)); - DCHECK(result->IsFixed()); - result->kind_ = DOUBLE_REGISTERS; - SetLiveRangeAssignedRegister(result, index); - fixed_double_live_ranges()[index] = result; - } - return result; -} - - -LiveRange* RegisterAllocator::LiveRangeFor(int index) { - if (index >= static_cast(live_ranges().size())) { - live_ranges().resize(index + 1, nullptr); - } - auto result = live_ranges()[index]; - if (result == nullptr) { - result = NewLiveRange(index); - live_ranges()[index] = result; - } - return result; -} - - -Instruction* RegisterAllocator::GetLastInstruction( - const InstructionBlock* block) { - return code()->InstructionAt(block->last_instruction_index()); -} - - -LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) { - if (operand->IsUnallocated()) { - return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register()); - } else if (operand->IsConstant()) { - return LiveRangeFor(ConstantOperand::cast(operand)->virtual_register()); - } else if (operand->IsRegister()) { - return FixedLiveRangeFor(RegisterOperand::cast(operand)->index()); - } else if (operand->IsDoubleRegister()) { - return FixedDoubleLiveRangeFor( - DoubleRegisterOperand::cast(operand)->index()); - } else { - return nullptr; - } -} - - -void RegisterAllocator::Define(LifetimePosition position, - InstructionOperand* operand, - InstructionOperand* hint) { - auto range = LiveRangeFor(operand); - if (range == nullptr) return; - - if (range->IsEmpty() || range->Start().Value() > position.Value()) { - // Can happen if there is a definition without use. - range->AddUseInterval(position, position.NextStart(), local_zone()); - range->AddUsePosition(position.NextStart(), nullptr, nullptr, local_zone()); - } else { - range->ShortenTo(position); - } - - if (operand->IsUnallocated()) { - auto unalloc_operand = UnallocatedOperand::cast(operand); - range->AddUsePosition(position, unalloc_operand, hint, local_zone()); - } -} - - -void RegisterAllocator::Use(LifetimePosition block_start, - LifetimePosition position, - InstructionOperand* operand, - InstructionOperand* hint) { - auto range = LiveRangeFor(operand); - if (range == nullptr) return; - if (operand->IsUnallocated()) { - UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); - range->AddUsePosition(position, unalloc_operand, hint, local_zone()); - } - range->AddUseInterval(block_start, position, local_zone()); -} - - -MoveOperands* RegisterAllocator::AddGapMove(int index, - Instruction::GapPosition position, - const InstructionOperand& from, - const InstructionOperand& to) { - auto instr = code()->InstructionAt(index); - auto moves = instr->GetOrCreateParallelMove(position, code_zone()); - return moves->AddMove(from, to); -} - - static bool AreUseIntervalsIntersecting(UseInterval* interval1, UseInterval* interval2) { while (interval1 != nullptr && interval2 != nullptr) { @@ -920,142 +680,295 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) { } -void RegisterAllocator::AssignSpillSlots() { - // Merge disjoint spill ranges - for (size_t i = 0; i < spill_ranges().size(); i++) { - auto range = spill_ranges()[i]; - if (range->IsEmpty()) continue; - for (size_t j = i + 1; j < spill_ranges().size(); j++) { - auto other = spill_ranges()[j]; - if (!other->IsEmpty()) { - range->TryMerge(other); - } - } - } - - // Allocate slots for the merged spill ranges. - for (auto range : spill_ranges()) { - if (range->IsEmpty()) continue; - // Allocate a new operand referring to the spill slot. - auto kind = range->Kind(); - int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS); - auto op_kind = kind == DOUBLE_REGISTERS - ? AllocatedOperand::DOUBLE_STACK_SLOT - : AllocatedOperand::STACK_SLOT; - auto op = AllocatedOperand::New(code_zone(), op_kind, index); - range->SetOperand(op); - } +RegisterAllocationData::RegisterAllocationData( + const RegisterConfiguration* config, Zone* zone, Frame* frame, + InstructionSequence* code, const char* debug_name) + : allocation_zone_(zone), + frame_(frame), + code_(code), + debug_name_(debug_name), + config_(config), + phi_map_(allocation_zone()), + live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()), + live_ranges_(code->VirtualRegisterCount() * 2, nullptr, + allocation_zone()), + fixed_live_ranges_(this->config()->num_general_registers(), nullptr, + allocation_zone()), + fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr, + allocation_zone()), + spill_ranges_(allocation_zone()), + assigned_registers_(nullptr), + assigned_double_registers_(nullptr) { + DCHECK(this->config()->num_general_registers() <= + RegisterConfiguration::kMaxGeneralRegisters); + DCHECK(this->config()->num_double_registers() <= + RegisterConfiguration::kMaxDoubleRegisters); + spill_ranges().reserve(8); + assigned_registers_ = new (code_zone()) + BitVector(this->config()->num_general_registers(), code_zone()); + assigned_double_registers_ = new (code_zone()) + BitVector(this->config()->num_aliased_double_registers(), code_zone()); + this->frame()->SetAllocatedRegisters(assigned_registers_); + this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_); } -void RegisterAllocator::CommitAssignment() { - for (auto range : live_ranges()) { - if (range == nullptr || range->IsEmpty()) continue; - InstructionOperand* spill_operand = nullptr; - if (!range->TopLevel()->HasNoSpillType()) { - spill_operand = range->TopLevel()->GetSpillOperand(); - } - auto assigned = range->GetAssignedOperand(); - range->ConvertUsesToOperand(assigned, spill_operand); - if (range->is_phi()) AssignPhiInput(range, assigned); - if (!range->IsChild() && spill_operand != nullptr) { - range->CommitSpillsAtDefinition(code(), spill_operand, - range->has_slot_use()); +LiveRange* RegisterAllocationData::LiveRangeFor(int index) { + if (index >= static_cast(live_ranges().size())) { + live_ranges().resize(index + 1, nullptr); + } + auto result = live_ranges()[index]; + if (result == nullptr) { + result = NewLiveRange(index); + live_ranges()[index] = result; + } + return result; +} + + +MoveOperands* RegisterAllocationData::AddGapMove( + int index, Instruction::GapPosition position, + const InstructionOperand& from, const InstructionOperand& to) { + auto instr = code()->InstructionAt(index); + auto moves = instr->GetOrCreateParallelMove(position, code_zone()); + return moves->AddMove(from, to); +} + + +void RegisterAllocationData::AssignPhiInput( + LiveRange* range, const InstructionOperand& assignment) { + DCHECK(range->is_phi()); + auto it = phi_map().find(range->id()); + DCHECK(it != phi_map().end()); + for (auto move : it->second->incoming_moves) { + move->set_destination(assignment); + } +} + + +LiveRange* RegisterAllocationData::NewLiveRange(int index) { + // The LiveRange object itself can go in the local zone, but the + // InstructionOperand needs to go in the code zone, since it may survive + // register allocation. + return new (allocation_zone()) LiveRange(index, code_zone()); +} + + +bool RegisterAllocationData::ExistsUseWithoutDefinition() { + bool found = false; + BitVector::Iterator iterator(live_in_sets()[0]); + while (!iterator.Done()) { + found = true; + int operand_index = iterator.Current(); + PrintF("Register allocator error: live v%d reached first block.\n", + operand_index); + LiveRange* range = LiveRangeFor(operand_index); + PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value()); + if (debug_name() == nullptr) { + PrintF("\n"); + } else { + PrintF(" (function: %s)\n", debug_name()); } + iterator.Advance(); } + return found; } -SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) { - auto spill_range = new (local_zone()) SpillRange(range, local_zone()); +SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange( + LiveRange* range) { + auto spill_range = + new (allocation_zone()) SpillRange(range, allocation_zone()); spill_ranges().push_back(spill_range); return spill_range; } -bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) { - if (range->IsChild() || !range->is_phi()) return false; - DCHECK(!range->HasSpillOperand()); +void RegisterAllocationData::SetLiveRangeAssignedRegister(LiveRange* range, + int reg) { + if (range->Kind() == DOUBLE_REGISTERS) { + assigned_double_registers_->Add(reg); + } else { + DCHECK(range->Kind() == GENERAL_REGISTERS); + assigned_registers_->Add(reg); + } + range->set_assigned_register(reg); + auto assignment = range->GetAssignedOperand(); + // TODO(dcarney): stop aliasing hint operands. + range->ConvertUsesToOperand(assignment, nullptr); + if (range->is_phi()) AssignPhiInput(range, assignment); +} - auto lookup = phi_map_.find(range->id()); - DCHECK(lookup != phi_map_.end()); - auto phi = lookup->second->phi; - auto block = lookup->second->block; - // Count the number of spilled operands. - size_t spilled_count = 0; - LiveRange* first_op = nullptr; - for (size_t i = 0; i < phi->operands().size(); i++) { - int op = phi->operands()[i]; - LiveRange* op_range = LiveRangeFor(op); - if (op_range->GetSpillRange() == nullptr) continue; - auto pred = code()->InstructionBlockAt(block->predecessors()[i]); - auto pred_end = LifetimePosition::InstructionFromInstructionIndex( - pred->last_instruction_index()); - while (op_range != nullptr && !op_range->CanCover(pred_end)) { - op_range = op_range->next(); - } - if (op_range != nullptr && op_range->IsSpilled()) { - spilled_count++; - if (first_op == nullptr) { - first_op = op_range->TopLevel(); - } + +LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data) + : data_(data) {} + + +BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block) { + // Compute live out for the given block, except not including backward + // successor edges. + auto live_out = new (allocation_zone()) + BitVector(code()->VirtualRegisterCount(), allocation_zone()); + + // Process all successor blocks. + for (auto succ : block->successors()) { + // Add values live on entry to the successor. Note the successor's + // live_in will not be computed yet for backwards edges. + auto live_in = live_in_sets()[succ.ToSize()]; + if (live_in != nullptr) live_out->Union(*live_in); + + // All phi input operands corresponding to this successor edge are live + // out from this block. + auto successor = code()->InstructionBlockAt(succ); + size_t index = successor->PredecessorIndexOf(block->rpo_number()); + DCHECK(index < successor->PredecessorCount()); + for (auto phi : successor->phis()) { + live_out->Add(phi->operands()[index]); } } + return live_out; +} - // Only continue if more than half of the operands are spilled. - if (spilled_count * 2 <= phi->operands().size()) { - return false; + +void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, + BitVector* live_out) { + // Add an interval that includes the entire block to the live range for + // each live_out value. + auto start = LifetimePosition::GapFromInstructionIndex( + block->first_instruction_index()); + auto end = LifetimePosition::InstructionFromInstructionIndex( + block->last_instruction_index()).NextStart(); + BitVector::Iterator iterator(live_out); + while (!iterator.Done()) { + int operand_index = iterator.Current(); + auto range = LiveRangeFor(operand_index); + range->AddUseInterval(start, end, allocation_zone()); + iterator.Advance(); } +} - // Try to merge the spilled operands and count the number of merged spilled - // operands. - DCHECK(first_op != nullptr); - auto first_op_spill = first_op->GetSpillRange(); - size_t num_merged = 1; - for (size_t i = 1; i < phi->operands().size(); i++) { - int op = phi->operands()[i]; - auto op_range = LiveRangeFor(op); - auto op_spill = op_range->GetSpillRange(); - if (op_spill != nullptr && - (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) { - num_merged++; + +Instruction* LiveRangeBuilder::GetLastInstruction( + const InstructionBlock* block) { + return code()->InstructionAt(block->last_instruction_index()); +} + + +int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) { + return -index - 1 - config()->num_general_registers(); +} + + +InstructionOperand* LiveRangeBuilder::AllocateFixed(UnallocatedOperand* operand, + int pos, bool is_tagged) { + TRACE("Allocating fixed reg for op %d\n", operand->virtual_register()); + DCHECK(operand->HasFixedPolicy()); + InstructionOperand allocated; + if (operand->HasFixedSlotPolicy()) { + allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, + operand->fixed_slot_index()); + } else if (operand->HasFixedRegisterPolicy()) { + allocated = AllocatedOperand(AllocatedOperand::REGISTER, + operand->fixed_register_index()); + } else if (operand->HasFixedDoubleRegisterPolicy()) { + allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER, + operand->fixed_register_index()); + } else { + UNREACHABLE(); + } + InstructionOperand::ReplaceWith(operand, &allocated); + if (is_tagged) { + TRACE("Fixed reg is tagged at %d\n", pos); + auto instr = InstructionAt(pos); + if (instr->HasReferenceMap()) { + instr->reference_map()->RecordReference(*operand); } } + return operand; +} - // Only continue if enough operands could be merged to the - // same spill slot. - if (num_merged * 2 <= phi->operands().size() || - AreUseIntervalsIntersecting(first_op_spill->interval(), - range->first_interval())) { - return false; + +LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) { + DCHECK(index < config()->num_general_registers()); + auto result = fixed_live_ranges()[index]; + if (result == nullptr) { + result = data()->NewLiveRange(FixedLiveRangeID(index)); + DCHECK(result->IsFixed()); + result->set_kind(GENERAL_REGISTERS); + data()->SetLiveRangeAssignedRegister(result, index); + fixed_live_ranges()[index] = result; } + return result; +} - // If the range does not need register soon, spill it to the merged - // spill range. - auto next_pos = range->Start(); - if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); - auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); - if (pos == nullptr) { - auto spill_range = range->TopLevel()->HasSpillRange() - ? range->TopLevel()->GetSpillRange() - : AssignSpillRangeToLiveRange(range->TopLevel()); - CHECK(first_op_spill->TryMerge(spill_range)); - Spill(range); - return true; - } else if (pos->pos().Value() > range->Start().NextStart().Value()) { - auto spill_range = range->TopLevel()->HasSpillRange() - ? range->TopLevel()->GetSpillRange() - : AssignSpillRangeToLiveRange(range->TopLevel()); - CHECK(first_op_spill->TryMerge(spill_range)); - SpillBetween(range, range->Start(), pos->pos()); - DCHECK(UnhandledIsSorted()); - return true; + +LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) { + DCHECK(index < config()->num_aliased_double_registers()); + auto result = fixed_double_live_ranges()[index]; + if (result == nullptr) { + result = data()->NewLiveRange(FixedDoubleLiveRangeID(index)); + DCHECK(result->IsFixed()); + result->set_kind(DOUBLE_REGISTERS); + data()->SetLiveRangeAssignedRegister(result, index); + fixed_double_live_ranges()[index] = result; } - return false; + return result; +} + + +LiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) { + if (operand->IsUnallocated()) { + return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register()); + } else if (operand->IsConstant()) { + return LiveRangeFor(ConstantOperand::cast(operand)->virtual_register()); + } else if (operand->IsRegister()) { + return FixedLiveRangeFor(RegisterOperand::cast(operand)->index()); + } else if (operand->IsDoubleRegister()) { + return FixedDoubleLiveRangeFor( + DoubleRegisterOperand::cast(operand)->index()); + } else { + return nullptr; + } +} + + +void LiveRangeBuilder::Define(LifetimePosition position, + InstructionOperand* operand, + InstructionOperand* hint) { + auto range = LiveRangeFor(operand); + if (range == nullptr) return; + + if (range->IsEmpty() || range->Start().Value() > position.Value()) { + // Can happen if there is a definition without use. + range->AddUseInterval(position, position.NextStart(), allocation_zone()); + range->AddUsePosition(position.NextStart(), nullptr, nullptr, + allocation_zone()); + } else { + range->ShortenTo(position); + } + + if (operand->IsUnallocated()) { + auto unalloc_operand = UnallocatedOperand::cast(operand); + range->AddUsePosition(position, unalloc_operand, hint, allocation_zone()); + } +} + + +void LiveRangeBuilder::Use(LifetimePosition block_start, + LifetimePosition position, + InstructionOperand* operand, + InstructionOperand* hint) { + auto range = LiveRangeFor(operand); + if (range == nullptr) return; + if (operand->IsUnallocated()) { + UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); + range->AddUsePosition(position, unalloc_operand, hint, allocation_zone()); + } + range->AddUseInterval(block_start, position, allocation_zone()); } -void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) { +void LiveRangeBuilder::MeetRegisterConstraints(const InstructionBlock* block) { int start = block->first_instruction_index(); int end = block->last_instruction_index(); DCHECK_NE(-1, start); @@ -1068,7 +981,7 @@ void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) { } -void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock( +void LiveRangeBuilder::MeetRegisterConstraintsForLastInstructionInBlock( const InstructionBlock* block) { int end = block->last_instruction_index(); auto last_instruction = InstructionAt(end); @@ -1084,7 +997,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock( // This value is produced on the stack, we never need to spill it. if (output->IsStackSlot()) { DCHECK(StackSlotOperand::cast(output)->index() < - frame_->GetSpillSlotCount()); + data()->frame()->GetSpillSlotCount()); range->SetSpillOperand(StackSlotOperand::cast(output)); range->SetSpillStartIndex(end); assigned = true; @@ -1097,7 +1010,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock( // Create an unconstrained operand for the same virtual register // and insert a gap move from the fixed output to the operand. UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg); - AddGapMove(gap_index, Instruction::START, *output, output_copy); + data()->AddGapMove(gap_index, Instruction::START, *output, output_copy); } } @@ -1106,7 +1019,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock( const InstructionBlock* successor = code()->InstructionBlockAt(succ); DCHECK(successor->PredecessorCount() == 1); int gap_index = successor->first_instruction_index(); - range->SpillAtDefinition(local_zone(), gap_index, output); + range->SpillAtDefinition(allocation_zone(), gap_index, output); range->SetSpillStartIndex(gap_index); } } @@ -1114,7 +1027,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock( } -void RegisterAllocator::MeetConstraintsAfter(int instr_index) { +void LiveRangeBuilder::MeetConstraintsAfter(int instr_index) { auto first = InstructionAt(instr_index); // Handle fixed temporaries. for (size_t i = 0; i < first->TempCount(); i++) { @@ -1137,31 +1050,32 @@ void RegisterAllocator::MeetConstraintsAfter(int instr_index) { if (first_output->HasFixedPolicy()) { int output_vreg = first_output->virtual_register(); UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg); - bool is_tagged = HasTaggedValue(output_vreg); + bool is_tagged = IsReference(output_vreg); AllocateFixed(first_output, instr_index, is_tagged); // This value is produced on the stack, we never need to spill it. if (first_output->IsStackSlot()) { DCHECK(StackSlotOperand::cast(first_output)->index() < - frame_->GetSpillSlotCount()); + data()->frame()->GetSpillSlotCount()); range->SetSpillOperand(StackSlotOperand::cast(first_output)); range->SetSpillStartIndex(instr_index + 1); assigned = true; } - AddGapMove(instr_index + 1, Instruction::START, *first_output, - output_copy); + data()->AddGapMove(instr_index + 1, Instruction::START, *first_output, + output_copy); } // Make sure we add a gap move for spilling (if we have not done // so already). if (!assigned) { - range->SpillAtDefinition(local_zone(), instr_index + 1, first_output); + range->SpillAtDefinition(allocation_zone(), instr_index + 1, + first_output); range->SetSpillStartIndex(instr_index + 1); } } } -void RegisterAllocator::MeetConstraintsBefore(int instr_index) { +void LiveRangeBuilder::MeetConstraintsBefore(int instr_index) { auto second = InstructionAt(instr_index); // Handle fixed input operands of second instruction. for (size_t i = 0; i < second->InputCount(); i++) { @@ -1171,9 +1085,9 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) { if (cur_input->HasFixedPolicy()) { int input_vreg = cur_input->virtual_register(); UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg); - bool is_tagged = HasTaggedValue(input_vreg); + bool is_tagged = IsReference(input_vreg); AllocateFixed(cur_input, instr_index, is_tagged); - AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); + data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); } } // Handle "output same as input" for second instruction. @@ -1189,12 +1103,12 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) { int input_vreg = cur_input->virtual_register(); UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg); cur_input->set_virtual_register(second_output->virtual_register()); - AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); - if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) { + data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); + if (IsReference(input_vreg) && !IsReference(output_vreg)) { if (second->HasReferenceMap()) { second->reference_map()->RecordReference(input_copy); } - } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) { + } else if (!IsReference(input_vreg) && IsReference(output_vreg)) { // The input is assumed to immediately have a tagged representation, // before the pointer map can be used. I.e. the pointer map at the // instruction will include the output operand (whose value at the @@ -1206,7 +1120,7 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) { } -bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) { +bool LiveRangeBuilder::IsOutputRegisterOf(Instruction* instr, int index) { for (size_t i = 0; i < instr->OutputCount(); i++) { auto output = instr->OutputAt(i); if (output->IsRegister() && RegisterOperand::cast(output)->index() == index) @@ -1216,8 +1130,7 @@ bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) { } -bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr, - int index) { +bool LiveRangeBuilder::IsOutputDoubleRegisterOf(Instruction* instr, int index) { for (size_t i = 0; i < instr->OutputCount(); i++) { auto output = instr->OutputAt(i); if (output->IsDoubleRegister() && @@ -1228,8 +1141,8 @@ bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr, } -void RegisterAllocator::ProcessInstructions(const InstructionBlock* block, - BitVector* live) { +void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, + BitVector* live) { int block_start = block->first_instruction_index(); auto block_start_position = LifetimePosition::GapFromInstructionIndex(block_start); @@ -1261,7 +1174,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block, if (!IsOutputRegisterOf(instr, i)) { auto range = FixedLiveRangeFor(i); range->AddUseInterval(curr_position, curr_position.End(), - local_zone()); + allocation_zone()); } } } @@ -1271,7 +1184,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block, if (!IsOutputDoubleRegisterOf(instr, i)) { auto range = FixedDoubleLiveRangeFor(i); range->AddUseInterval(curr_position, curr_position.End(), - local_zone()); + allocation_zone()); } } } @@ -1362,11 +1275,12 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block, } -void RegisterAllocator::ResolvePhis(const InstructionBlock* block) { +void LiveRangeBuilder::ResolvePhis(const InstructionBlock* block) { for (auto phi : block->phis()) { int phi_vreg = phi->virtual_register(); - auto map_value = new (local_zone()) PhiMapValue(phi, block, local_zone()); - auto res = phi_map_.insert(std::make_pair(phi_vreg, map_value)); + auto map_value = new (allocation_zone()) + RegisterAllocationData::PhiMapValue(phi, block, allocation_zone()); + auto res = phi_map().insert(std::make_pair(phi_vreg, map_value)); DCHECK(res.second); USE(res); auto& output = phi->output(); @@ -1374,15 +1288,15 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) { InstructionBlock* cur_block = code()->InstructionBlockAt(block->predecessors()[i]); UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]); - auto move = AddGapMove(cur_block->last_instruction_index(), - Instruction::END, input, output); + auto move = data()->AddGapMove(cur_block->last_instruction_index(), + Instruction::END, input, output); map_value->incoming_moves.push_back(move); DCHECK(!InstructionAt(cur_block->last_instruction_index()) ->HasReferenceMap()); } auto live_range = LiveRangeFor(phi_vreg); int gap_index = block->first_instruction_index(); - live_range->SpillAtDefinition(local_zone(), gap_index, &output); + live_range->SpillAtDefinition(allocation_zone(), gap_index, &output); live_range->SetSpillStartIndex(gap_index); // We use the phi-ness of some nodes in some later heuristics. live_range->set_is_phi(true); @@ -1391,25 +1305,14 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) { } -void RegisterAllocator::AssignPhiInput(LiveRange* range, - const InstructionOperand& assignment) { - DCHECK(range->is_phi()); - auto it = phi_map_.find(range->id()); - DCHECK(it != phi_map_.end()); - for (auto move : it->second->incoming_moves) { - move->set_destination(assignment); - } -} - - -void RegisterAllocator::MeetRegisterConstraints() { +void LiveRangeBuilder::MeetRegisterConstraints() { for (auto block : code()->instruction_blocks()) { MeetRegisterConstraints(block); } } -void RegisterAllocator::ResolvePhis() { +void LiveRangeBuilder::ResolvePhis() { // Process the blocks in reverse order. for (auto i = code()->instruction_blocks().rbegin(); i != code()->instruction_blocks().rend(); ++i) { @@ -1418,275 +1321,7 @@ void RegisterAllocator::ResolvePhis() { } -const InstructionBlock* RegisterAllocator::GetInstructionBlock( - LifetimePosition pos) { - return code()->GetInstructionBlock(pos.ToInstructionIndex()); -} - - -void RegisterAllocator::ConnectRanges() { - ZoneMap, InstructionOperand> - delayed_insertion_map(local_zone()); - for (auto first_range : live_ranges()) { - if (first_range == nullptr || first_range->IsChild()) continue; - for (auto second_range = first_range->next(); second_range != nullptr; - first_range = second_range, second_range = second_range->next()) { - auto pos = second_range->Start(); - // Add gap move if the two live ranges touch and there is no block - // boundary. - if (second_range->IsSpilled()) continue; - if (first_range->End().Value() != pos.Value()) continue; - if (IsBlockBoundary(pos) && - !CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) { - continue; - } - auto prev_operand = first_range->GetAssignedOperand(); - auto cur_operand = second_range->GetAssignedOperand(); - if (prev_operand == cur_operand) continue; - bool delay_insertion = false; - Instruction::GapPosition gap_pos; - int gap_index = pos.ToInstructionIndex(); - if (pos.IsGapPosition()) { - gap_pos = pos.IsStart() ? Instruction::START : Instruction::END; - } else { - if (pos.IsStart()) { - delay_insertion = true; - } else { - gap_index++; - } - gap_pos = delay_insertion ? Instruction::END : Instruction::START; - } - auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove( - gap_pos, code_zone()); - if (!delay_insertion) { - move->AddMove(prev_operand, cur_operand); - } else { - delayed_insertion_map.insert( - std::make_pair(std::make_pair(move, prev_operand), cur_operand)); - } - } - } - if (delayed_insertion_map.empty()) return; - // Insert all the moves which should occur after the stored move. - ZoneVector to_insert(local_zone()); - ZoneVector to_eliminate(local_zone()); - to_insert.reserve(4); - to_eliminate.reserve(4); - auto moves = delayed_insertion_map.begin()->first.first; - for (auto it = delayed_insertion_map.begin();; ++it) { - bool done = it == delayed_insertion_map.end(); - if (done || it->first.first != moves) { - // Commit the MoveOperands for current ParallelMove. - for (auto move : to_eliminate) { - move->Eliminate(); - } - for (auto move : to_insert) { - moves->push_back(move); - } - if (done) break; - // Reset state. - to_eliminate.clear(); - to_insert.clear(); - moves = it->first.first; - } - // Gather all MoveOperands for a single ParallelMove. - auto move = new (code_zone()) MoveOperands(it->first.second, it->second); - auto eliminate = moves->PrepareInsertAfter(move); - to_insert.push_back(move); - if (eliminate != nullptr) to_eliminate.push_back(eliminate); - } -} - - -bool RegisterAllocator::CanEagerlyResolveControlFlow( - const InstructionBlock* block) const { - if (block->PredecessorCount() != 1) return false; - return block->predecessors()[0].IsNext(block->rpo_number()); -} - - -namespace { - -class LiveRangeBound { - public: - explicit LiveRangeBound(const LiveRange* range) - : range_(range), start_(range->Start()), end_(range->End()) { - DCHECK(!range->IsEmpty()); - } - - bool CanCover(LifetimePosition position) { - return start_.Value() <= position.Value() && - position.Value() < end_.Value(); - } - - const LiveRange* const range_; - const LifetimePosition start_; - const LifetimePosition end_; - - private: - DISALLOW_COPY_AND_ASSIGN(LiveRangeBound); -}; - - -struct FindResult { - const LiveRange* cur_cover_; - const LiveRange* pred_cover_; -}; - - -class LiveRangeBoundArray { - public: - LiveRangeBoundArray() : length_(0), start_(nullptr) {} - - bool ShouldInitialize() { return start_ == nullptr; } - - void Initialize(Zone* zone, const LiveRange* const range) { - size_t length = 0; - for (auto i = range; i != nullptr; i = i->next()) length++; - start_ = zone->NewArray(length); - length_ = length; - auto curr = start_; - for (auto i = range; i != nullptr; i = i->next(), ++curr) { - new (curr) LiveRangeBound(i); - } - } - - LiveRangeBound* Find(const LifetimePosition position) const { - size_t left_index = 0; - size_t right_index = length_; - while (true) { - size_t current_index = left_index + (right_index - left_index) / 2; - DCHECK(right_index > current_index); - auto bound = &start_[current_index]; - if (bound->start_.Value() <= position.Value()) { - if (position.Value() < bound->end_.Value()) return bound; - DCHECK(left_index < current_index); - left_index = current_index; - } else { - right_index = current_index; - } - } - } - - LiveRangeBound* FindPred(const InstructionBlock* pred) { - auto pred_end = LifetimePosition::InstructionFromInstructionIndex( - pred->last_instruction_index()); - return Find(pred_end); - } - - LiveRangeBound* FindSucc(const InstructionBlock* succ) { - auto succ_start = LifetimePosition::GapFromInstructionIndex( - succ->first_instruction_index()); - return Find(succ_start); - } - - void Find(const InstructionBlock* block, const InstructionBlock* pred, - FindResult* result) const { - auto pred_end = LifetimePosition::InstructionFromInstructionIndex( - pred->last_instruction_index()); - auto bound = Find(pred_end); - result->pred_cover_ = bound->range_; - auto cur_start = LifetimePosition::GapFromInstructionIndex( - block->first_instruction_index()); - // Common case. - if (bound->CanCover(cur_start)) { - result->cur_cover_ = bound->range_; - return; - } - result->cur_cover_ = Find(cur_start)->range_; - DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr); - } - - private: - size_t length_; - LiveRangeBound* start_; - - DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray); -}; - - -class LiveRangeFinder { - public: - explicit LiveRangeFinder(const RegisterAllocator& allocator) - : allocator_(allocator), - bounds_length_(static_cast(allocator.live_ranges().size())), - bounds_(allocator.local_zone()->NewArray( - bounds_length_)) { - for (int i = 0; i < bounds_length_; ++i) { - new (&bounds_[i]) LiveRangeBoundArray(); - } - } - - LiveRangeBoundArray* ArrayFor(int operand_index) { - DCHECK(operand_index < bounds_length_); - auto range = allocator_.live_ranges()[operand_index]; - DCHECK(range != nullptr && !range->IsEmpty()); - auto array = &bounds_[operand_index]; - if (array->ShouldInitialize()) { - array->Initialize(allocator_.local_zone(), range); - } - return array; - } - - private: - const RegisterAllocator& allocator_; - const int bounds_length_; - LiveRangeBoundArray* const bounds_; - - DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder); -}; - -} // namespace - - -void RegisterAllocator::ResolveControlFlow() { - // Lazily linearize live ranges in memory for fast lookup. - LiveRangeFinder finder(*this); - for (auto block : code()->instruction_blocks()) { - if (CanEagerlyResolveControlFlow(block)) continue; - auto live = live_in_sets_[block->rpo_number().ToInt()]; - BitVector::Iterator iterator(live); - while (!iterator.Done()) { - auto* array = finder.ArrayFor(iterator.Current()); - for (auto pred : block->predecessors()) { - FindResult result; - const auto* pred_block = code()->InstructionBlockAt(pred); - array->Find(block, pred_block, &result); - if (result.cur_cover_ == result.pred_cover_ || - result.cur_cover_->IsSpilled()) - continue; - auto pred_op = result.pred_cover_->GetAssignedOperand(); - auto cur_op = result.cur_cover_->GetAssignedOperand(); - if (pred_op == cur_op) continue; - ResolveControlFlow(block, cur_op, pred_block, pred_op); - } - iterator.Advance(); - } - } -} - - -void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block, - const InstructionOperand& cur_op, - const InstructionBlock* pred, - const InstructionOperand& pred_op) { - DCHECK(pred_op != cur_op); - int gap_index; - Instruction::GapPosition position; - if (block->PredecessorCount() == 1) { - gap_index = block->first_instruction_index(); - position = Instruction::START; - } else { - DCHECK(pred->SuccessorCount() == 1); - DCHECK(!InstructionAt(pred->last_instruction_index())->HasReferenceMap()); - gap_index = pred->last_instruction_index(); - position = Instruction::END; - } - AddGapMove(gap_index, position, pred_op, cur_op); -} - - -void RegisterAllocator::BuildLiveRanges() { +void LiveRangeBuilder::BuildLiveRanges() { // Process the blocks in reverse order. for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; --block_id) { @@ -1724,7 +1359,7 @@ void RegisterAllocator::BuildLiveRanges() { // Now live is live_in for this block except not including values live // out on backward successor edges. - live_in_sets_[block_id] = live; + live_in_sets()[block_id] = live; if (block->IsLoopHeader()) { // Add a live range stretching from the first loop instruction to the last @@ -1737,23 +1372,23 @@ void RegisterAllocator::BuildLiveRanges() { while (!iterator.Done()) { int operand_index = iterator.Current(); auto range = LiveRangeFor(operand_index); - range->EnsureInterval(start, end, local_zone()); + range->EnsureInterval(start, end, allocation_zone()); iterator.Advance(); } // Insert all values into the live in sets of all blocks in the loop. for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt(); ++i) { - live_in_sets_[i]->Union(*live); + live_in_sets()[i]->Union(*live); } } } for (auto range : live_ranges()) { if (range == nullptr) continue; - range->kind_ = RequiredRegisterKind(range->id()); + range->set_kind(RequiredRegisterKind(range->id())); // Give slots to all ranges with a non fixed slot use. if (range->has_slot_use() && range->HasNoSpillType()) { - AssignSpillRangeToLiveRange(range); + data()->AssignSpillRangeToLiveRange(range); } // TODO(bmeurer): This is a horrible hack to make sure that for constant // live ranges, every use requires the constant to be in a register. @@ -1771,136 +1406,49 @@ void RegisterAllocator::BuildLiveRanges() { } } } + +#ifdef DEBUG + Verify(); +#endif } -bool RegisterAllocator::ExistsUseWithoutDefinition() { - bool found = false; - BitVector::Iterator iterator(live_in_sets_[0]); - while (!iterator.Done()) { - found = true; - int operand_index = iterator.Current(); - PrintF("Register allocator error: live v%d reached first block.\n", - operand_index); - LiveRange* range = LiveRangeFor(operand_index); - PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value()); - if (debug_name() == nullptr) { - PrintF("\n"); - } else { - PrintF(" (function: %s)\n", debug_name()); - } - iterator.Advance(); - } - return found; -} - - -bool RegisterAllocator::SafePointsAreInOrder() const { - int safe_point = 0; - for (auto map : *code()->reference_maps()) { - if (safe_point > map->instruction_position()) return false; - safe_point = map->instruction_position(); - } - return true; +RegisterKind LiveRangeBuilder::RequiredRegisterKind( + int virtual_register) const { + return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS + : GENERAL_REGISTERS; } -void RegisterAllocator::PopulateReferenceMaps() { - DCHECK(SafePointsAreInOrder()); - - // Iterate over all safe point positions and record a pointer - // for all spilled live ranges at this point. - int last_range_start = 0; - auto reference_maps = code()->reference_maps(); - ReferenceMapDeque::const_iterator first_it = reference_maps->begin(); - for (LiveRange* range : live_ranges()) { - if (range == nullptr) continue; - // Iterate over the first parts of multi-part live ranges. - if (range->IsChild()) continue; - // Skip non-reference values. - if (!HasTaggedValue(range->id())) continue; - // Skip empty live ranges. - if (range->IsEmpty()) continue; - - // Find the extent of the range and its children. - int start = range->Start().ToInstructionIndex(); - int end = 0; - for (auto cur = range; cur != nullptr; cur = cur->next()) { - auto this_end = cur->End(); - if (this_end.ToInstructionIndex() > end) - end = this_end.ToInstructionIndex(); - DCHECK(cur->Start().ToInstructionIndex() >= start); - } - - // Most of the ranges are in order, but not all. Keep an eye on when they - // step backwards and reset the first_it so we don't miss any safe points. - if (start < last_range_start) first_it = reference_maps->begin(); - last_range_start = start; - - // Step across all the safe points that are before the start of this range, - // recording how far we step in order to save doing this for the next range. - for (; first_it != reference_maps->end(); ++first_it) { - auto map = *first_it; - if (map->instruction_position() >= start) break; - } - - // Step through the safe points to see whether they are in the range. - for (auto it = first_it; it != reference_maps->end(); ++it) { - auto map = *it; - int safe_point = map->instruction_position(); - - // The safe points are sorted so we can stop searching here. - if (safe_point - 1 > end) break; - - // Advance to the next active range that covers the current - // safe point position. - auto safe_point_pos = - LifetimePosition::InstructionFromInstructionIndex(safe_point); - auto cur = range; - while (cur != nullptr && !cur->Covers(safe_point_pos)) { - cur = cur->next(); - } - if (cur == nullptr) continue; - - // Check if the live range is spilled and the safe point is after - // the spill position. - if (range->HasSpillOperand() && - safe_point >= range->spill_start_index() && - !range->GetSpillOperand()->IsConstant()) { - TRACE("Pointer for range %d (spilled at %d) at safe point %d\n", - range->id(), range->spill_start_index(), safe_point); - map->RecordReference(*range->GetSpillOperand()); - } - - if (!cur->IsSpilled()) { - TRACE( - "Pointer in register for range %d (start at %d) " - "at safe point %d\n", - cur->id(), cur->Start().Value(), safe_point); - auto operand = cur->GetAssignedOperand(); - DCHECK(!operand.IsStackSlot()); - map->RecordReference(operand); - } - } +void LiveRangeBuilder::Verify() const { + for (auto current : data()->live_ranges()) { + if (current != nullptr) current->Verify(); } } -void RegisterAllocator::AllocateGeneralRegisters() { - num_registers_ = config()->num_general_registers(); - mode_ = GENERAL_REGISTERS; - AllocateRegisters(); -} - - -void RegisterAllocator::AllocateDoubleRegisters() { - num_registers_ = config()->num_aliased_double_registers(); - mode_ = DOUBLE_REGISTERS; - AllocateRegisters(); +LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, + RegisterKind kind) + : data_(data), + mode_(kind), + num_registers_(kind == DOUBLE_REGISTERS + ? config()->num_aliased_double_registers() + : config()->num_general_registers()), + unhandled_live_ranges_(allocation_zone()), + active_live_ranges_(allocation_zone()), + inactive_live_ranges_(allocation_zone()) { + unhandled_live_ranges().reserve( + static_cast(code()->VirtualRegisterCount() * 2)); + active_live_ranges().reserve(8); + inactive_live_ranges().reserve(8); + // TryAllocateFreeReg and AllocateBlockedReg assume this + // when allocating local arrays. + DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= + config()->num_general_registers()); } -void RegisterAllocator::AllocateRegisters() { +void LinearScanAllocator::AllocateRegisters() { DCHECK(unhandled_live_ranges().empty()); for (auto range : live_ranges()) { @@ -1916,18 +1464,13 @@ void RegisterAllocator::AllocateRegisters() { DCHECK(inactive_live_ranges().empty()); if (mode_ == DOUBLE_REGISTERS) { - for (int i = 0; i < config()->num_aliased_double_registers(); ++i) { - auto current = fixed_double_live_ranges()[i]; - if (current != nullptr) { - AddToInactive(current); - } + for (auto current : fixed_double_live_ranges()) { + if (current != nullptr) AddToInactive(current); } } else { DCHECK(mode_ == GENERAL_REGISTERS); for (auto current : fixed_live_ranges()) { - if (current != nullptr) { - AddToInactive(current); - } + if (current != nullptr) AddToInactive(current); } } @@ -2001,7 +1544,7 @@ void RegisterAllocator::AllocateRegisters() { } -const char* RegisterAllocator::RegisterName(int allocation_index) { +const char* LinearScanAllocator::RegisterName(int allocation_index) { if (mode_ == GENERAL_REGISTERS) { return config()->general_register_name(allocation_index); } else { @@ -2010,31 +1553,19 @@ const char* RegisterAllocator::RegisterName(int allocation_index) { } -bool RegisterAllocator::HasTaggedValue(int virtual_register) const { - return code()->IsReference(virtual_register); -} - - -RegisterKind RegisterAllocator::RequiredRegisterKind( - int virtual_register) const { - return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS - : GENERAL_REGISTERS; -} - - -void RegisterAllocator::AddToActive(LiveRange* range) { +void LinearScanAllocator::AddToActive(LiveRange* range) { TRACE("Add live range %d to active\n", range->id()); active_live_ranges().push_back(range); } -void RegisterAllocator::AddToInactive(LiveRange* range) { +void LinearScanAllocator::AddToInactive(LiveRange* range) { TRACE("Add live range %d to inactive\n", range->id()); inactive_live_ranges().push_back(range); } -void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) { +void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) { if (range == nullptr || range->IsEmpty()) return; DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); DCHECK(allocation_finger_.Value() <= range->Start().Value()); @@ -2054,7 +1585,7 @@ void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) { } -void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) { +void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) { if (range == nullptr || range->IsEmpty()) return; DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); TRACE("Add live range %d to unhandled unsorted at end\n", range->id()); @@ -2073,14 +1604,14 @@ static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) { // Sort the unhandled live ranges so that the ranges to be processed first are // at the end of the array list. This is convenient for the register allocation // algorithm because it is efficient to remove elements from the end. -void RegisterAllocator::SortUnhandled() { +void LinearScanAllocator::SortUnhandled() { TRACE("Sort unhandled\n"); std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(), &UnhandledSortHelper); } -bool RegisterAllocator::UnhandledIsSorted() { +bool LinearScanAllocator::UnhandledIsSorted() { size_t len = unhandled_live_ranges().size(); for (size_t i = 1; i < len; i++) { auto a = unhandled_live_ranges().at(i - 1); @@ -2091,33 +1622,33 @@ bool RegisterAllocator::UnhandledIsSorted() { } -void RegisterAllocator::ActiveToHandled(LiveRange* range) { +void LinearScanAllocator::ActiveToHandled(LiveRange* range) { RemoveElement(&active_live_ranges(), range); TRACE("Moving live range %d from active to handled\n", range->id()); } -void RegisterAllocator::ActiveToInactive(LiveRange* range) { +void LinearScanAllocator::ActiveToInactive(LiveRange* range) { RemoveElement(&active_live_ranges(), range); inactive_live_ranges().push_back(range); TRACE("Moving live range %d from active to inactive\n", range->id()); } -void RegisterAllocator::InactiveToHandled(LiveRange* range) { +void LinearScanAllocator::InactiveToHandled(LiveRange* range) { RemoveElement(&inactive_live_ranges(), range); TRACE("Moving live range %d from inactive to handled\n", range->id()); } -void RegisterAllocator::InactiveToActive(LiveRange* range) { +void LinearScanAllocator::InactiveToActive(LiveRange* range) { RemoveElement(&inactive_live_ranges(), range); active_live_ranges().push_back(range); TRACE("Moving live range %d from inactive to active\n", range->id()); } -bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) { +bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters]; for (int i = 0; i < num_registers_; i++) { @@ -2186,7 +1717,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) { } -void RegisterAllocator::AllocateBlockedReg(LiveRange* current) { +void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) { auto register_use = current->NextRegisterPosition(current->Start()); if (register_use == nullptr) { // There is no use in the current live range that requires a register. @@ -2276,7 +1807,7 @@ static const InstructionBlock* GetContainingLoop( } -LifetimePosition RegisterAllocator::FindOptimalSpillingPos( +LifetimePosition LinearScanAllocator::FindOptimalSpillingPos( LiveRange* range, LifetimePosition pos) { auto block = GetInstructionBlock(pos.Start()); auto loop_header = @@ -2308,7 +1839,7 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos( } -void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) { +void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { DCHECK(current->HasRegisterAssigned()); int reg = current->assigned_register(); auto split_pos = current->Start(); @@ -2356,15 +1887,94 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) { } -bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) { - return pos.IsFullStart() && - code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == - pos.ToInstructionIndex(); +bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) { + if (range->IsChild() || !range->is_phi()) return false; + DCHECK(!range->HasSpillOperand()); + + auto lookup = phi_map().find(range->id()); + DCHECK(lookup != phi_map().end()); + auto phi = lookup->second->phi; + auto block = lookup->second->block; + // Count the number of spilled operands. + size_t spilled_count = 0; + LiveRange* first_op = nullptr; + for (size_t i = 0; i < phi->operands().size(); i++) { + int op = phi->operands()[i]; + LiveRange* op_range = LiveRangeFor(op); + if (op_range->GetSpillRange() == nullptr) continue; + auto pred = code()->InstructionBlockAt(block->predecessors()[i]); + auto pred_end = LifetimePosition::InstructionFromInstructionIndex( + pred->last_instruction_index()); + while (op_range != nullptr && !op_range->CanCover(pred_end)) { + op_range = op_range->next(); + } + if (op_range != nullptr && op_range->IsSpilled()) { + spilled_count++; + if (first_op == nullptr) { + first_op = op_range->TopLevel(); + } + } + } + + // Only continue if more than half of the operands are spilled. + if (spilled_count * 2 <= phi->operands().size()) { + return false; + } + + // Try to merge the spilled operands and count the number of merged spilled + // operands. + DCHECK(first_op != nullptr); + auto first_op_spill = first_op->GetSpillRange(); + size_t num_merged = 1; + for (size_t i = 1; i < phi->operands().size(); i++) { + int op = phi->operands()[i]; + auto op_range = LiveRangeFor(op); + auto op_spill = op_range->GetSpillRange(); + if (op_spill != nullptr && + (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) { + num_merged++; + } + } + + // Only continue if enough operands could be merged to the + // same spill slot. + if (num_merged * 2 <= phi->operands().size() || + AreUseIntervalsIntersecting(first_op_spill->interval(), + range->first_interval())) { + return false; + } + + // If the range does not need register soon, spill it to the merged + // spill range. + auto next_pos = range->Start(); + if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); + auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); + if (pos == nullptr) { + auto spill_range = + range->TopLevel()->HasSpillRange() + ? range->TopLevel()->GetSpillRange() + : data()->AssignSpillRangeToLiveRange(range->TopLevel()); + bool merged = first_op_spill->TryMerge(spill_range); + CHECK(merged); + Spill(range); + return true; + } else if (pos->pos().Value() > range->Start().NextStart().Value()) { + auto spill_range = + range->TopLevel()->HasSpillRange() + ? range->TopLevel()->GetSpillRange() + : data()->AssignSpillRangeToLiveRange(range->TopLevel()); + bool merged = first_op_spill->TryMerge(spill_range); + CHECK(merged); + SpillBetween(range, range->Start(), pos->pos()); + DCHECK(UnhandledIsSorted()); + return true; + } + return false; } -LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, - LifetimePosition pos) { +LiveRange* LinearScanAllocator::SplitRangeAt(LiveRange* range, + LifetimePosition pos) { DCHECK(!range->IsFixed()); TRACE("Splitting live range %d at %d\n", range->id(), pos.Value()); @@ -2378,14 +1988,14 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, int vreg = GetVirtualRegister(); auto result = LiveRangeFor(vreg); - range->SplitAt(pos, result, local_zone()); + range->SplitAt(pos, result, allocation_zone()); return result; } -LiveRange* RegisterAllocator::SplitBetween(LiveRange* range, - LifetimePosition start, - LifetimePosition end) { +LiveRange* LinearScanAllocator::SplitBetween(LiveRange* range, + LifetimePosition start, + LifetimePosition end) { DCHECK(!range->IsFixed()); TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(), start.Value(), end.Value()); @@ -2396,8 +2006,8 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range, } -LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, - LifetimePosition end) { +LifetimePosition LinearScanAllocator::FindOptimalSplitPos( + LifetimePosition start, LifetimePosition end) { int start_instr = start.ToInstructionIndex(); int end_instr = end.ToInstructionIndex(); DCHECK(start_instr <= end_instr); @@ -2432,22 +2042,22 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, } -void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { +void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { auto second_part = SplitRangeAt(range, pos); Spill(second_part); } -void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start, - LifetimePosition end) { +void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start, + LifetimePosition end) { SpillBetweenUntil(range, start, start, end); } -void RegisterAllocator::SpillBetweenUntil(LiveRange* range, - LifetimePosition start, - LifetimePosition until, - LifetimePosition end) { +void LinearScanAllocator::SpillBetweenUntil(LiveRange* range, + LifetimePosition start, + LifetimePosition until, + LifetimePosition end) { CHECK(start.Value() < end.Value()); auto second_part = SplitRangeAt(range, start); @@ -2456,7 +2066,7 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range, // Split it at position between ]start+1, end[, spill the middle part // and put the rest to unhandled. auto third_part_end = end.PrevStart().End(); - if (IsBlockBoundary(end.Start())) { + if (data()->IsBlockBoundary(end.Start())) { third_part_end = end.Start(); } auto third_part = SplitBetween( @@ -2474,46 +2084,427 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range, } -void RegisterAllocator::Spill(LiveRange* range) { +void LinearScanAllocator::Spill(LiveRange* range) { DCHECK(!range->IsSpilled()); TRACE("Spilling live range %d\n", range->id()); auto first = range->TopLevel(); if (first->HasNoSpillType()) { - AssignSpillRangeToLiveRange(first); + data()->AssignSpillRangeToLiveRange(first); } range->MakeSpilled(); } -int RegisterAllocator::RegisterCount() const { return num_registers_; } +OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} -#ifdef DEBUG +void OperandAssigner::AssignSpillSlots() { + auto& spill_ranges = data()->spill_ranges(); + // Merge disjoint spill ranges + for (size_t i = 0; i < spill_ranges.size(); i++) { + auto range = spill_ranges[i]; + if (range->IsEmpty()) continue; + for (size_t j = i + 1; j < spill_ranges.size(); j++) { + auto other = spill_ranges[j]; + if (!other->IsEmpty()) { + range->TryMerge(other); + } + } + } + // Allocate slots for the merged spill ranges. + for (auto range : spill_ranges) { + if (range->IsEmpty()) continue; + // Allocate a new operand referring to the spill slot. + auto kind = range->Kind(); + int index = data()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS); + auto op_kind = kind == DOUBLE_REGISTERS + ? AllocatedOperand::DOUBLE_STACK_SLOT + : AllocatedOperand::STACK_SLOT; + auto op = AllocatedOperand::New(data()->code_zone(), op_kind, index); + range->SetOperand(op); + } +} -void RegisterAllocator::Verify() const { - for (auto current : live_ranges()) { - if (current != nullptr) current->Verify(); +void OperandAssigner::CommitAssignment() { + for (auto range : data()->live_ranges()) { + if (range == nullptr || range->IsEmpty()) continue; + InstructionOperand* spill_operand = nullptr; + if (!range->TopLevel()->HasNoSpillType()) { + spill_operand = range->TopLevel()->GetSpillOperand(); + } + auto assigned = range->GetAssignedOperand(); + range->ConvertUsesToOperand(assigned, spill_operand); + if (range->is_phi()) data()->AssignPhiInput(range, assigned); + if (!range->IsChild() && spill_operand != nullptr) { + range->CommitSpillsAtDefinition(data()->code(), spill_operand, + range->has_slot_use()); + } } } -#endif +ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data) + : data_(data) {} -void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range, - int reg) { - if (range->Kind() == DOUBLE_REGISTERS) { - assigned_double_registers_->Add(reg); - } else { - DCHECK(range->Kind() == GENERAL_REGISTERS); - assigned_registers_->Add(reg); +bool ReferenceMapPopulator::SafePointsAreInOrder() const { + int safe_point = 0; + for (auto map : *data()->code()->reference_maps()) { + if (safe_point > map->instruction_position()) return false; + safe_point = map->instruction_position(); + } + return true; +} + + +void ReferenceMapPopulator::PopulateReferenceMaps() { + DCHECK(SafePointsAreInOrder()); + + // Iterate over all safe point positions and record a pointer + // for all spilled live ranges at this point. + int last_range_start = 0; + auto reference_maps = data()->code()->reference_maps(); + ReferenceMapDeque::const_iterator first_it = reference_maps->begin(); + for (LiveRange* range : data()->live_ranges()) { + if (range == nullptr) continue; + // Iterate over the first parts of multi-part live ranges. + if (range->IsChild()) continue; + // Skip non-reference values. + if (!IsReference(range->id())) continue; + // Skip empty live ranges. + if (range->IsEmpty()) continue; + + // Find the extent of the range and its children. + int start = range->Start().ToInstructionIndex(); + int end = 0; + for (auto cur = range; cur != nullptr; cur = cur->next()) { + auto this_end = cur->End(); + if (this_end.ToInstructionIndex() > end) + end = this_end.ToInstructionIndex(); + DCHECK(cur->Start().ToInstructionIndex() >= start); + } + + // Most of the ranges are in order, but not all. Keep an eye on when they + // step backwards and reset the first_it so we don't miss any safe points. + if (start < last_range_start) first_it = reference_maps->begin(); + last_range_start = start; + + // Step across all the safe points that are before the start of this range, + // recording how far we step in order to save doing this for the next range. + for (; first_it != reference_maps->end(); ++first_it) { + auto map = *first_it; + if (map->instruction_position() >= start) break; + } + + // Step through the safe points to see whether they are in the range. + for (auto it = first_it; it != reference_maps->end(); ++it) { + auto map = *it; + int safe_point = map->instruction_position(); + + // The safe points are sorted so we can stop searching here. + if (safe_point - 1 > end) break; + + // Advance to the next active range that covers the current + // safe point position. + auto safe_point_pos = + LifetimePosition::InstructionFromInstructionIndex(safe_point); + auto cur = range; + while (cur != nullptr && !cur->Covers(safe_point_pos)) { + cur = cur->next(); + } + if (cur == nullptr) continue; + + // Check if the live range is spilled and the safe point is after + // the spill position. + if (range->HasSpillOperand() && + safe_point >= range->spill_start_index() && + !range->GetSpillOperand()->IsConstant()) { + TRACE("Pointer for range %d (spilled at %d) at safe point %d\n", + range->id(), range->spill_start_index(), safe_point); + map->RecordReference(*range->GetSpillOperand()); + } + + if (!cur->IsSpilled()) { + TRACE( + "Pointer in register for range %d (start at %d) " + "at safe point %d\n", + cur->id(), cur->Start().Value(), safe_point); + auto operand = cur->GetAssignedOperand(); + DCHECK(!operand.IsStackSlot()); + map->RecordReference(operand); + } + } + } +} + + +namespace { + +class LiveRangeBound { + public: + explicit LiveRangeBound(const LiveRange* range) + : range_(range), start_(range->Start()), end_(range->End()) { + DCHECK(!range->IsEmpty()); + } + + bool CanCover(LifetimePosition position) { + return start_.Value() <= position.Value() && + position.Value() < end_.Value(); + } + + const LiveRange* const range_; + const LifetimePosition start_; + const LifetimePosition end_; + + private: + DISALLOW_COPY_AND_ASSIGN(LiveRangeBound); +}; + + +struct FindResult { + const LiveRange* cur_cover_; + const LiveRange* pred_cover_; +}; + + +class LiveRangeBoundArray { + public: + LiveRangeBoundArray() : length_(0), start_(nullptr) {} + + bool ShouldInitialize() { return start_ == nullptr; } + + void Initialize(Zone* zone, const LiveRange* const range) { + size_t length = 0; + for (auto i = range; i != nullptr; i = i->next()) length++; + start_ = zone->NewArray(length); + length_ = length; + auto curr = start_; + for (auto i = range; i != nullptr; i = i->next(), ++curr) { + new (curr) LiveRangeBound(i); + } + } + + LiveRangeBound* Find(const LifetimePosition position) const { + size_t left_index = 0; + size_t right_index = length_; + while (true) { + size_t current_index = left_index + (right_index - left_index) / 2; + DCHECK(right_index > current_index); + auto bound = &start_[current_index]; + if (bound->start_.Value() <= position.Value()) { + if (position.Value() < bound->end_.Value()) return bound; + DCHECK(left_index < current_index); + left_index = current_index; + } else { + right_index = current_index; + } + } + } + + LiveRangeBound* FindPred(const InstructionBlock* pred) { + auto pred_end = LifetimePosition::InstructionFromInstructionIndex( + pred->last_instruction_index()); + return Find(pred_end); + } + + LiveRangeBound* FindSucc(const InstructionBlock* succ) { + auto succ_start = LifetimePosition::GapFromInstructionIndex( + succ->first_instruction_index()); + return Find(succ_start); + } + + void Find(const InstructionBlock* block, const InstructionBlock* pred, + FindResult* result) const { + auto pred_end = LifetimePosition::InstructionFromInstructionIndex( + pred->last_instruction_index()); + auto bound = Find(pred_end); + result->pred_cover_ = bound->range_; + auto cur_start = LifetimePosition::GapFromInstructionIndex( + block->first_instruction_index()); + // Common case. + if (bound->CanCover(cur_start)) { + result->cur_cover_ = bound->range_; + return; + } + result->cur_cover_ = Find(cur_start)->range_; + DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr); + } + + private: + size_t length_; + LiveRangeBound* start_; + + DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray); +}; + + +class LiveRangeFinder { + public: + explicit LiveRangeFinder(const RegisterAllocationData* data) + : data_(data), + bounds_length_(static_cast(data_->live_ranges().size())), + bounds_(data_->allocation_zone()->NewArray( + bounds_length_)) { + for (int i = 0; i < bounds_length_; ++i) { + new (&bounds_[i]) LiveRangeBoundArray(); + } + } + + LiveRangeBoundArray* ArrayFor(int operand_index) { + DCHECK(operand_index < bounds_length_); + auto range = data_->live_ranges()[operand_index]; + DCHECK(range != nullptr && !range->IsEmpty()); + auto array = &bounds_[operand_index]; + if (array->ShouldInitialize()) { + array->Initialize(data_->allocation_zone(), range); + } + return array; + } + + private: + const RegisterAllocationData* const data_; + const int bounds_length_; + LiveRangeBoundArray* const bounds_; + + DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder); +}; + +} // namespace + + +LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data) + : data_(data) {} + + +bool LiveRangeConnector::CanEagerlyResolveControlFlow( + const InstructionBlock* block) const { + if (block->PredecessorCount() != 1) return false; + return block->predecessors()[0].IsNext(block->rpo_number()); +} + + +void LiveRangeConnector::ResolveControlFlow() { + // Lazily linearize live ranges in memory for fast lookup. + LiveRangeFinder finder(data()); + auto& live_in_sets = data()->live_in_sets(); + for (auto block : code()->instruction_blocks()) { + if (CanEagerlyResolveControlFlow(block)) continue; + auto live = live_in_sets[block->rpo_number().ToInt()]; + BitVector::Iterator iterator(live); + while (!iterator.Done()) { + auto* array = finder.ArrayFor(iterator.Current()); + for (auto pred : block->predecessors()) { + FindResult result; + const auto* pred_block = code()->InstructionBlockAt(pred); + array->Find(block, pred_block, &result); + if (result.cur_cover_ == result.pred_cover_ || + result.cur_cover_->IsSpilled()) + continue; + auto pred_op = result.pred_cover_->GetAssignedOperand(); + auto cur_op = result.cur_cover_->GetAssignedOperand(); + if (pred_op == cur_op) continue; + ResolveControlFlow(block, cur_op, pred_block, pred_op); + } + iterator.Advance(); + } + } +} + + +void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block, + const InstructionOperand& cur_op, + const InstructionBlock* pred, + const InstructionOperand& pred_op) { + DCHECK(pred_op != cur_op); + int gap_index; + Instruction::GapPosition position; + if (block->PredecessorCount() == 1) { + gap_index = block->first_instruction_index(); + position = Instruction::START; + } else { + DCHECK(pred->SuccessorCount() == 1); + DCHECK(!data() + ->InstructionAt(pred->last_instruction_index()) + ->HasReferenceMap()); + gap_index = pred->last_instruction_index(); + position = Instruction::END; + } + data()->AddGapMove(gap_index, position, pred_op, cur_op); +} + + +void LiveRangeConnector::ConnectRanges(Zone* temp_zone) { + ZoneMap, InstructionOperand> + delayed_insertion_map(temp_zone); + for (auto first_range : data()->live_ranges()) { + if (first_range == nullptr || first_range->IsChild()) continue; + for (auto second_range = first_range->next(); second_range != nullptr; + first_range = second_range, second_range = second_range->next()) { + auto pos = second_range->Start(); + // Add gap move if the two live ranges touch and there is no block + // boundary. + if (second_range->IsSpilled()) continue; + if (first_range->End().Value() != pos.Value()) continue; + if (data()->IsBlockBoundary(pos) && + !CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) { + continue; + } + auto prev_operand = first_range->GetAssignedOperand(); + auto cur_operand = second_range->GetAssignedOperand(); + if (prev_operand == cur_operand) continue; + bool delay_insertion = false; + Instruction::GapPosition gap_pos; + int gap_index = pos.ToInstructionIndex(); + if (pos.IsGapPosition()) { + gap_pos = pos.IsStart() ? Instruction::START : Instruction::END; + } else { + if (pos.IsStart()) { + delay_insertion = true; + } else { + gap_index++; + } + gap_pos = delay_insertion ? Instruction::END : Instruction::START; + } + auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove( + gap_pos, code_zone()); + if (!delay_insertion) { + move->AddMove(prev_operand, cur_operand); + } else { + delayed_insertion_map.insert( + std::make_pair(std::make_pair(move, prev_operand), cur_operand)); + } + } + } + if (delayed_insertion_map.empty()) return; + // Insert all the moves which should occur after the stored move. + ZoneVector to_insert(temp_zone); + ZoneVector to_eliminate(temp_zone); + to_insert.reserve(4); + to_eliminate.reserve(4); + auto moves = delayed_insertion_map.begin()->first.first; + for (auto it = delayed_insertion_map.begin();; ++it) { + bool done = it == delayed_insertion_map.end(); + if (done || it->first.first != moves) { + // Commit the MoveOperands for current ParallelMove. + for (auto move : to_eliminate) { + move->Eliminate(); + } + for (auto move : to_insert) { + moves->push_back(move); + } + if (done) break; + // Reset state. + to_eliminate.clear(); + to_insert.clear(); + moves = it->first.first; + } + // Gather all MoveOperands for a single ParallelMove. + auto move = new (code_zone()) MoveOperands(it->first.second, it->second); + auto eliminate = moves->PrepareInsertAfter(move); + to_insert.push_back(move); + if (eliminate != nullptr) to_eliminate.push_back(eliminate); } - range->set_assigned_register(reg); - auto assignment = range->GetAssignedOperand(); - // TODO(dcarney): stop aliasing hint operands. - range->ConvertUsesToOperand(assignment, nullptr); - if (range->is_phi()) AssignPhiInput(range, assignment); } } // namespace compiler diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h index b719f32..d968258 100644 --- a/src/compiler/register-allocator.h +++ b/src/compiler/register-allocator.h @@ -340,17 +340,18 @@ class LiveRange final : public ZoneObject { // Shorten the most recently added interval by setting a new start. void ShortenTo(LifetimePosition start); -#ifdef DEBUG // True if target overlaps an existing interval. bool HasOverlap(UseInterval* target) const; void Verify() const; -#endif + + void ConvertUsesToOperand(const InstructionOperand& op, + InstructionOperand* spill_op); + + void set_kind(RegisterKind kind) { kind_ = kind; } private: struct SpillAtDefinitionList; - void ConvertUsesToOperand(const InstructionOperand& op, - InstructionOperand* spill_op); UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const; void AdvanceLastProcessedMarker(UseInterval* to_start_of, LifetimePosition but_not_past) const; @@ -381,8 +382,6 @@ class LiveRange final : public ZoneObject { }; SpillAtDefinitionList* spills_at_definition_; - friend class RegisterAllocator; // Assigns to kind_. - DISALLOW_COPY_AND_ASSIGN(LiveRange); }; @@ -412,81 +411,143 @@ class SpillRange final : public ZoneObject { }; -class RegisterAllocator final : public ZoneObject { +class RegisterAllocationData final : public ZoneObject { public: - explicit RegisterAllocator(const RegisterConfiguration* config, - Zone* local_zone, Frame* frame, - InstructionSequence* code, - const char* debug_name = nullptr); + class PhiMapValue : public ZoneObject { + public: + PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone) + : phi(phi), block(block), incoming_moves(zone) { + incoming_moves.reserve(phi->operands().size()); + } + PhiInstruction* const phi; + const InstructionBlock* const block; + ZoneVector incoming_moves; + }; + typedef ZoneMap PhiMap; + + RegisterAllocationData(const RegisterConfiguration* config, + Zone* allocation_zone, Frame* frame, + InstructionSequence* code, + const char* debug_name = nullptr); const ZoneVector& live_ranges() const { return live_ranges_; } + ZoneVector& live_ranges() { return live_ranges_; } const ZoneVector& fixed_live_ranges() const { return fixed_live_ranges_; } + ZoneVector& fixed_live_ranges() { return fixed_live_ranges_; } + ZoneVector& fixed_double_live_ranges() { + return fixed_double_live_ranges_; + } const ZoneVector& fixed_double_live_ranges() const { return fixed_double_live_ranges_; } + const ZoneVector& live_in_sets() const { return live_in_sets_; } + ZoneVector& live_in_sets() { return live_in_sets_; } + ZoneVector& spill_ranges() { return spill_ranges_; } + const ZoneVector& spill_ranges() const { return spill_ranges_; } InstructionSequence* code() const { return code_; } - // This zone is for datastructures only needed during register allocation. - Zone* local_zone() const { return local_zone_; } - - // Phase 1 : insert moves to account for fixed register operands. - void MeetRegisterConstraints(); + // This zone is for datastructures only needed during register allocation + // phases. + Zone* allocation_zone() const { return allocation_zone_; } + // This zone is for InstructionOperands and moves that live beyond register + // allocation. + Zone* code_zone() const { return code()->zone(); } + const PhiMap& phi_map() const { return phi_map_; } + PhiMap& phi_map() { return phi_map_; } + Frame* frame() const { return frame_; } + const char* debug_name() const { return debug_name_; } + const RegisterConfiguration* config() const { return config_; } - // Phase 2: deconstruct SSA by inserting moves in successors and the headers - // of blocks containing phis. - void ResolvePhis(); + void SetLiveRangeAssignedRegister(LiveRange* range, int reg); + LiveRange* LiveRangeFor(int index); + Instruction* InstructionAt(int index) { return code()->InstructionAt(index); } - // Phase 3: compute liveness of all virtual register. - void BuildLiveRanges(); - bool ExistsUseWithoutDefinition(); + void AssignPhiInput(LiveRange* range, const InstructionOperand& assignment); + SpillRange* AssignSpillRangeToLiveRange(LiveRange* range); - // Phase 4: compute register assignments. - void AllocateGeneralRegisters(); - void AllocateDoubleRegisters(); + MoveOperands* AddGapMove(int index, Instruction::GapPosition position, + const InstructionOperand& from, + const InstructionOperand& to); - // Phase 5: assign spill splots. - void AssignSpillSlots(); + bool IsBlockBoundary(LifetimePosition pos) { + return pos.IsFullStart() && + code() + ->GetInstructionBlock(pos.ToInstructionIndex()) + ->code_start() == pos.ToInstructionIndex(); + } - // Phase 6: commit assignment. - void CommitAssignment(); + const InstructionBlock* GetInstructionBlock(LifetimePosition pos) const { + return code()->GetInstructionBlock(pos.ToInstructionIndex()); + } - // Phase 7: compute values for pointer maps. - void PopulateReferenceMaps(); + bool IsReference(int virtual_register) const { + return code()->IsReference(virtual_register); + } - // Phase 8: reconnect split ranges with moves. - void ConnectRanges(); + bool ExistsUseWithoutDefinition(); - // Phase 9: insert moves to connect ranges across basic blocks. - void ResolveControlFlow(); + // Creates a new live range. + LiveRange* NewLiveRange(int index); private: - int GetVirtualRegister() { return code()->NextVirtualRegister(); } + Zone* const allocation_zone_; + Frame* const frame_; + InstructionSequence* const code_; + const char* const debug_name_; + const RegisterConfiguration* const config_; + PhiMap phi_map_; + ZoneVector live_in_sets_; + ZoneVector live_ranges_; + ZoneVector fixed_live_ranges_; + ZoneVector fixed_double_live_ranges_; + ZoneVector spill_ranges_; + BitVector* assigned_registers_; + BitVector* assigned_double_registers_; - // Checks whether the value of a given virtual register is a reference. - // TODO(titzer): rename this to IsReference. - bool HasTaggedValue(int virtual_register) const; + DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData); +}; - // Returns the register kind required by the given virtual register. - RegisterKind RequiredRegisterKind(int virtual_register) const; - // Creates a new live range. - LiveRange* NewLiveRange(int index); +class LiveRangeBuilder final : public ZoneObject { + public: + explicit LiveRangeBuilder(RegisterAllocationData* data); - // This zone is for InstructionOperands and moves that live beyond register - // allocation. + // Phase 1 : insert moves to account for fixed register operands. + void MeetRegisterConstraints(); + + // Phase 2: deconstruct SSA by inserting moves in successors and the headers + // of blocks containing phis. + void ResolvePhis(); + + // Phase 3: compute liveness of all virtual register. + void BuildLiveRanges(); + + private: + RegisterAllocationData* data() const { return data_; } + InstructionSequence* code() const { return data()->code(); } + Zone* allocation_zone() const { return data()->allocation_zone(); } Zone* code_zone() const { return code()->zone(); } + const RegisterConfiguration* config() const { return data()->config(); } + ZoneVector& live_in_sets() const { + return data()->live_in_sets(); + } + ZoneVector& live_ranges() { return data()->live_ranges(); } + ZoneVector& fixed_live_ranges() { + return data()->fixed_live_ranges(); + } + ZoneVector& fixed_double_live_ranges() { + return data()->fixed_double_live_ranges(); + } + ZoneVector& spill_ranges() { return data()->spill_ranges(); } + RegisterAllocationData::PhiMap& phi_map() { return data()->phi_map(); } - BitVector* assigned_registers() { return assigned_registers_; } - BitVector* assigned_double_registers() { return assigned_double_registers_; } + Instruction* InstructionAt(int index) { return code()->InstructionAt(index); } + bool IsReference(int virtual_register) const { + return data()->IsReference(virtual_register); + } -#ifdef DEBUG void Verify() const; -#endif - - void AllocateRegisters(); - bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const; - bool SafePointsAreInOrder() const; // Liveness analysis support. BitVector* ComputeLiveOut(const InstructionBlock* block); @@ -501,6 +562,16 @@ class RegisterAllocator final : public ZoneObject { const InstructionBlock* block); void ResolvePhis(const InstructionBlock* block); + LiveRange* LiveRangeFor(int index) { return data()->LiveRangeFor(index); } + static int FixedLiveRangeID(int index) { return -index - 1; } + int FixedDoubleLiveRangeID(int index); + LiveRange* FixedLiveRangeFor(int index); + LiveRange* FixedDoubleLiveRangeFor(int index); + Instruction* GetLastInstruction(const InstructionBlock* block); + + // Returns the register kind required by the given virtual register. + RegisterKind RequiredRegisterKind(int virtual_register) const; + // Helper methods for building intervals. InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos, bool is_tagged); @@ -509,9 +580,36 @@ class RegisterAllocator final : public ZoneObject { InstructionOperand* hint); void Use(LifetimePosition block_start, LifetimePosition position, InstructionOperand* operand, InstructionOperand* hint); - MoveOperands* AddGapMove(int index, Instruction::GapPosition position, - const InstructionOperand& from, - const InstructionOperand& to); + + RegisterAllocationData* const data_; + + DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder); +}; + + +class LinearScanAllocator final : public ZoneObject { + public: + explicit LinearScanAllocator(RegisterAllocationData* data, RegisterKind kind); + + // Phase 4: compute register assignments. + void AllocateRegisters(); + + private: + RegisterAllocationData* data() const { return data_; } + InstructionSequence* code() const { return data()->code(); } + Zone* allocation_zone() const { return data()->allocation_zone(); } + Zone* code_zone() const { return code()->zone(); } + const RegisterConfiguration* config() const { return data()->config(); } + + Instruction* InstructionAt(int index) { return code()->InstructionAt(index); } + + int GetVirtualRegister() { return code()->NextVirtualRegister(); } + + bool IsReference(int virtual_register) const { + return data()->IsReference(virtual_register); + } + + LiveRange* LiveRangeFor(int index) { return data()->LiveRangeFor(index); } // Helper methods for updating the life range lists. void AddToActive(LiveRange* range); @@ -529,7 +627,6 @@ class RegisterAllocator final : public ZoneObject { bool TryReuseSpillForPhi(LiveRange* range); bool TryAllocateFreeReg(LiveRange* range); void AllocateBlockedReg(LiveRange* range); - SpillRange* AssignSpillRangeToLiveRange(LiveRange* range); // Live range splitting helpers. @@ -571,40 +668,26 @@ class RegisterAllocator final : public ZoneObject { LifetimePosition pos); void Spill(LiveRange* range); - bool IsBlockBoundary(LifetimePosition pos); - - // Helper methods for resolving control flow. - void ResolveControlFlow(const InstructionBlock* block, - const InstructionOperand& cur_op, - const InstructionBlock* pred, - const InstructionOperand& pred_op); - - void SetLiveRangeAssignedRegister(LiveRange* range, int reg); // Return the block which contains give lifetime position. - const InstructionBlock* GetInstructionBlock(LifetimePosition pos); + const InstructionBlock* GetInstructionBlock(LifetimePosition pos) const { + return data()->GetInstructionBlock(pos); + } - // Helper methods for the fixed registers. - int RegisterCount() const; - static int FixedLiveRangeID(int index) { return -index - 1; } - int FixedDoubleLiveRangeID(int index); - LiveRange* FixedLiveRangeFor(int index); - LiveRange* FixedDoubleLiveRangeFor(int index); - LiveRange* LiveRangeFor(int index); - Instruction* GetLastInstruction(const InstructionBlock* block); + void SetLiveRangeAssignedRegister(LiveRange* range, int reg) { + data()->SetLiveRangeAssignedRegister(range, reg); + } + // Helper methods for the fixed registers. + int RegisterCount() const { return num_registers_; } const char* RegisterName(int allocation_index); - Instruction* InstructionAt(int index) { return code()->InstructionAt(index); } - void AssignPhiInput(LiveRange* range, const InstructionOperand& assignment); - - Frame* frame() const { return frame_; } - const char* debug_name() const { return debug_name_; } - const RegisterConfiguration* config() const { return config_; } - ZoneVector& live_ranges() { return live_ranges_; } - ZoneVector& fixed_live_ranges() { return fixed_live_ranges_; } + ZoneVector& live_ranges() { return data()->live_ranges(); } + ZoneVector& fixed_live_ranges() { + return data()->fixed_live_ranges(); + } ZoneVector& fixed_double_live_ranges() { - return fixed_double_live_ranges_; + return data()->fixed_double_live_ranges(); } ZoneVector& unhandled_live_ranges() { return unhandled_live_ranges_; @@ -613,54 +696,92 @@ class RegisterAllocator final : public ZoneObject { ZoneVector& inactive_live_ranges() { return inactive_live_ranges_; } - ZoneVector& spill_ranges() { return spill_ranges_; } + ZoneVector& spill_ranges() { return data()->spill_ranges(); } + RegisterAllocationData::PhiMap& phi_map() { return data()->phi_map(); } - class PhiMapValue : public ZoneObject { - public: - PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone) - : phi(phi), block(block), incoming_moves(zone) { - incoming_moves.reserve(phi->operands().size()); - } - PhiInstruction* const phi; - const InstructionBlock* const block; - ZoneVector incoming_moves; - }; - typedef ZoneMap PhiMap; + RegisterAllocationData* const data_; + const RegisterKind mode_; + const int num_registers_; - Zone* const local_zone_; - Frame* const frame_; - InstructionSequence* const code_; - const char* const debug_name_; - - const RegisterConfiguration* config_; - PhiMap phi_map_; - - // During liveness analysis keep a mapping from block id to live_in sets - // for blocks already analyzed. - ZoneVector live_in_sets_; - - // Liveness analysis results. - ZoneVector live_ranges_; - - // Lists of live ranges - ZoneVector fixed_live_ranges_; - ZoneVector fixed_double_live_ranges_; ZoneVector unhandled_live_ranges_; ZoneVector active_live_ranges_; ZoneVector inactive_live_ranges_; - ZoneVector spill_ranges_; - - RegisterKind mode_; - int num_registers_; - - BitVector* assigned_registers_; - BitVector* assigned_double_registers_; #ifdef DEBUG LifetimePosition allocation_finger_; #endif - DISALLOW_COPY_AND_ASSIGN(RegisterAllocator); + DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator); +}; + + +class OperandAssigner final : public ZoneObject { + public: + explicit OperandAssigner(RegisterAllocationData* data); + + // Phase 5: assign spill splots. + void AssignSpillSlots(); + + // Phase 6: commit assignment. + void CommitAssignment(); + + private: + RegisterAllocationData* data() const { return data_; } + + RegisterAllocationData* const data_; + + DISALLOW_COPY_AND_ASSIGN(OperandAssigner); +}; + + +class ReferenceMapPopulator final : public ZoneObject { + public: + explicit ReferenceMapPopulator(RegisterAllocationData* data); + + // Phase 7: compute values for pointer maps. + void PopulateReferenceMaps(); + + private: + bool SafePointsAreInOrder() const; + + bool IsReference(int virtual_register) const { + return data()->IsReference(virtual_register); + } + + RegisterAllocationData* data() const { return data_; } + + RegisterAllocationData* const data_; + + DISALLOW_COPY_AND_ASSIGN(ReferenceMapPopulator); +}; + + +class LiveRangeConnector final : public ZoneObject { + public: + explicit LiveRangeConnector(RegisterAllocationData* data); + + // Phase 8: reconnect split ranges with moves. + void ConnectRanges(Zone* temp_zone); + + // Phase 9: insert moves to connect ranges across basic blocks. + void ResolveControlFlow(); + + private: + const InstructionBlock* GetInstructionBlock(LifetimePosition pos) const { + return data()->GetInstructionBlock(pos); + } + bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const; + void ResolveControlFlow(const InstructionBlock* block, + const InstructionOperand& cur_op, + const InstructionBlock* pred, + const InstructionOperand& pred_op); + InstructionSequence* code() const { return data()->code(); } + Zone* code_zone() const { return code()->zone(); } + RegisterAllocationData* data() const { return data_; } + + RegisterAllocationData* const data_; + + DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector); }; } // namespace compiler -- 2.7.4