code_(code),
debug_name_(debug_name),
config_(config),
+ phi_map_(PhiMap::key_compare(), PhiMap::allocator_type(local_zone())),
live_in_sets_(code->InstructionBlockCount(), local_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, local_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), NULL,
}
+bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
+ DCHECK(FLAG_turbo_reuse_spill_slots);
+ DCHECK(!range->HasAllocatedSpillOperand());
+ if (range->IsChild() || !range->is_phi()) return false;
+
+ auto lookup = phi_map_.find(range->id());
+ DCHECK(lookup != phi_map_.end());
+ auto phi = lookup->second.phi;
+ auto block = lookup->second.block;
+ // Count the number of spilled operands.
+ size_t spilled_count = 0;
+ LiveRange* first_op = nullptr;
+ for (size_t i = 0; i < phi->operands().size(); i++) {
+ int op = phi->operands()[i];
+ LiveRange* op_range = LiveRangeFor(op);
+ if (op_range->GetSpillRange() == nullptr) continue;
+ auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
+ LifetimePosition pred_end =
+ LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+ while (op_range != nullptr && !op_range->CanCover(pred_end)) {
+ op_range = op_range->next();
+ }
+ if (op_range != nullptr && op_range->IsSpilled()) {
+ spilled_count++;
+ if (first_op == nullptr) {
+ first_op = op_range->TopLevel();
+ }
+ }
+ }
+
+ // Only continue if more than half of the operands are spilled.
+ if (spilled_count * 2 <= phi->operands().size()) {
+ return false;
+ }
+
+ // Try to merge the spilled operands and count the number of merged spilled
+ // operands.
+ DCHECK(first_op != NULL);
+ SpillRange* first_op_spill = first_op->GetSpillRange();
+ size_t num_merged = 1;
+ for (size_t i = 1; i < phi->operands().size(); i++) {
+ int op = phi->operands()[i];
+ LiveRange* op_range = LiveRangeFor(op);
+ SpillRange* op_spill = op_range->GetSpillRange();
+ if (op_spill != NULL) {
+ if (op_spill->id() == first_op_spill->id() ||
+ first_op_spill->TryMerge(op_spill, local_zone())) {
+ num_merged++;
+ }
+ }
+ }
+
+ // Only continue if enough operands could be merged to the
+ // same spill slot.
+ if (num_merged * 2 <= phi->operands().size() ||
+ AreUseIntervalsIntersecting(first_op_spill->interval(),
+ range->first_interval())) {
+ return false;
+ }
+
+ // If the range does not need register soon, spill it to the merged
+ // spill range.
+ LifetimePosition next_pos = range->Start();
+ if (code()->IsGapAt(next_pos.InstructionIndex())) {
+ next_pos = next_pos.NextInstruction();
+ }
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ if (pos == NULL) {
+ SpillRange* spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+ CHECK(first_op_spill->TryMerge(spill_range, local_zone()));
+ Spill(range);
+ return true;
+ } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
+ SpillRange* spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+ CHECK(first_op_spill->TryMerge(spill_range, local_zone()));
+ SpillBetween(range, range->Start(), pos->pos());
+ if (!AllocationOk()) return false;
+ DCHECK(UnhandledIsSorted());
+ return true;
+ }
+ return false;
+}
+
+
void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
for (auto phi : block->phis()) {
+ if (FLAG_turbo_reuse_spill_slots) {
+ auto res = phi_map_.insert(
+ std::make_pair(phi->virtual_register(), PhiMapValue(phi, block)));
+ DCHECK(res.second);
+ USE(res);
+ }
auto output = phi->output();
int phi_vreg = phi->virtual_register();
if (!FLAG_turbo_delay_ssa_decon) {
}
}
+ if (FLAG_turbo_reuse_spill_slots) {
+ if (TryReuseSpillForPhi(current)) {
+ continue;
+ }
+ if (!AllocationOk()) return;
+ }
+
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* cur_active = active_live_ranges_.at(i);
if (cur_active->End().Value() <= position.Value()) {
void InactiveToActive(LiveRange* range);
// Helper methods for allocating registers.
+ bool TryReuseSpillForPhi(LiveRange* range);
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
+ struct PhiMapValue {
+ PhiMapValue(PhiInstruction* phi, const InstructionBlock* block)
+ : phi(phi), block(block) {}
+ PhiInstruction* const phi;
+ const InstructionBlock* const block;
+ };
+ typedef std::map<int, PhiMapValue, std::less<int>,
+ zone_allocator<std::pair<int, PhiMapValue>>> PhiMap;
+
Zone* const local_zone_;
Frame* const frame_;
InstructionSequence* const code_;
const RegisterConfiguration* config_;
+ PhiMap phi_map_;
+
// During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed.
ZoneVector<BitVector*> live_in_sets_;