--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
+
+void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
+ UpdateWeightAtAllocation(range);
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ storage().insert({interval->start(), interval->end(), range});
+ }
+}
+
+
+void CoalescedLiveRanges::Remove(LiveRange* range) {
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ storage().erase({interval->start(), interval->end(), nullptr});
+ }
+ range->UnsetAssignedRegister();
+}
+
+
+float CoalescedLiveRanges::GetMaximumConflictingWeight(
+ const LiveRange* range) const {
+ float ret = LiveRange::kInvalidWeight;
+ auto end = storage().end();
+ for (auto query = range->first_interval(); query != nullptr;
+ query = query->next()) {
+ auto conflict = GetFirstConflict(query);
+
+ if (conflict == end) continue;
+ for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
+ // It is possible we'll visit the same range multiple times, because
+ // successive (not necessarily consecutive) intervals belong to the same
+ // range, or because different intervals of the query range have the same
+ // range as conflict.
+ DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight);
+ ret = Max(ret, conflict->range->weight());
+ if (ret == LiveRange::kMaxWeight) break;
+ }
+ }
+ return ret;
+}
+
+
+void CoalescedLiveRanges::EvictAndRescheduleConflicts(
+ LiveRange* range, AllocationScheduler* scheduler) {
+ auto end = storage().end();
+
+ for (auto query = range->first_interval(); query != nullptr;
+ query = query->next()) {
+ auto conflict = GetFirstConflict(query);
+ if (conflict == end) continue;
+ while (QueryIntersectsAllocatedInterval(query, conflict)) {
+ LiveRange* range_to_evict = conflict->range;
+ // Bypass successive intervals belonging to the same range, because we're
+ // about to remove this range, and we don't want the storage iterator to
+ // become invalid.
+ while (conflict != end && conflict->range == range_to_evict) {
+ ++conflict;
+ }
+
+ DCHECK(range_to_evict->HasRegisterAssigned());
+ CHECK(!range_to_evict->IsFixed());
+ Remove(range_to_evict);
+ UpdateWeightAtEviction(range_to_evict);
+ TRACE("Evicted range %d.\n", range_to_evict->id());
+ scheduler->Schedule(range_to_evict);
+ }
+ }
+}
+
+
+bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
+ LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
+ for (auto i : storage_) {
+ if (i.start < last_end) {
+ return false;
+ }
+ last_end = i.end;
+ }
+ return true;
+}
+
+
+void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() * kAllocatedRangeMultiplier);
+}
+
+
+void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() / kAllocatedRangeMultiplier);
+}
+
+
+CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
+ const UseInterval* query) const {
+ DCHECK(query != nullptr);
+ auto end = storage().end();
+ LifetimePosition q_start = query->start();
+ LifetimePosition q_end = query->end();
+
+ if (storage().empty() || storage().rbegin()->end <= q_start ||
+ storage().begin()->start >= q_end) {
+ return end;
+ }
+
+ auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
+ // ret is either at the end (no start strictly greater than q_start) or
+ // at some position with the aforementioned property. In either case, the
+ // allocated interval before this one may intersect our query:
+ // either because, although it starts before this query's start, it ends
+ // after; or because it starts exactly at the query start. So unless we're
+ // right at the beginning of the storage - meaning the first allocated
+ // interval is also starting after this query's start - see what's behind.
+ if (ret != storage().begin()) {
+ --ret;
+ if (!QueryIntersectsAllocatedInterval(query, ret)) {
+ // The interval behind wasn't intersecting, so move back.
+ ++ret;
+ }
+ }
+ if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
+ return end;
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
} while (false)
-class CoalescedLiveRanges : public ZoneObject {
- public:
- explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
+namespace {
- LiveRange* Find(UseInterval* query) {
- ZoneSplayTree<Config>::Locator locator;
- if (storage_.Find(GetKey(query), &locator)) {
- return locator.value();
- }
- return nullptr;
+void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
+ int reg_id = range->assigned_register();
+ range->SetUseHints(reg_id);
+ if (range->is_phi()) {
+ data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
}
+}
- // TODO(mtrofin): Change to void returning if we do not care if the interval
- // was previously added.
- bool Insert(LiveRange* range) {
- auto* interval = range->first_interval();
- while (interval != nullptr) {
- if (!Insert(interval, range)) return false;
- interval = interval->next();
- }
- return true;
- }
- bool Remove(LiveRange* range) {
- bool ret = false;
- auto* segment = range->first_interval();
- while (segment != nullptr) {
- ret |= Remove(segment);
- segment = segment->next();
- }
- return ret;
- }
+LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
+ LifetimePosition pos) {
+ DCHECK(range->Start() < pos && pos < range->End());
+ DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+ (data->code()
+ ->GetInstructionBlock(pos.ToInstructionIndex())
+ ->last_instruction_index() != pos.ToInstructionIndex()));
+ LiveRange* result = data->NewChildRangeFor(range);
+ range->SplitAt(pos, result, data->allocation_zone());
+ return result;
+}
- bool IsEmpty() { return storage_.is_empty(); }
-
- private:
- struct Config {
- typedef std::pair<int, int> Key;
- typedef LiveRange* Value;
- static const Key kNoKey;
- static Value NoValue() { return nullptr; }
- static int Compare(const Key& a, const Key& b) {
- if (a.second <= b.first) return -1;
- if (a.first >= b.second) return 1;
- return 0;
- }
- };
- Config::Key GetKey(UseInterval* interval) {
- if (interval == nullptr) return std::make_pair(0, 0);
- return std::make_pair(interval->start().value(), interval->end().value());
- }
+// TODO(mtrofin): explain why splitting in gap START is always OK.
+LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ const InstructionSequence* code,
+ int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
- // TODO(mtrofin): Change to void returning if we do not care if the interval
- // was previously added.
- bool Insert(UseInterval* interval, LiveRange* range) {
- ZoneSplayTree<Config>::Locator locator;
- bool ret = storage_.Insert(GetKey(interval), &locator);
- if (ret) locator.set_value(range);
- return ret;
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
}
+ return ret;
+}
- bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
- ZoneSplayTree<Config> storage_;
- DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
+int GetFirstGapIndex(const UseInterval* interval) {
+ LifetimePosition start = interval->start();
+ int ret = start.ToInstructionIndex();
+ return ret;
+}
-const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0};
+int GetLastGapIndex(const UseInterval* interval) {
+ LifetimePosition end = interval->end();
+ return end.ToInstructionIndex();
+}
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
- RegisterKind kind, Zone* local_zone)
- : RegisterAllocator(data, kind),
- local_zone_(local_zone),
- allocations_(local_zone),
- queue_(local_zone) {}
+// Basic heuristic for advancing the algorithm, if any other splitting heuristic
+// failed.
+LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
+ const InstructionSequence* code) {
+ if (range->first_interval()->next() != nullptr) {
+ return range->first_interval()->next()->start();
+ }
-unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
- auto interval = range->first_interval();
- if (interval == nullptr) return 0;
+ UseInterval* interval = range->first_interval();
+ int first = GetFirstGapIndex(interval);
+ int last = GetLastGapIndex(interval);
+ if (first == last) return LifetimePosition::Invalid();
- unsigned size = 0;
- while (interval != nullptr) {
- size += (interval->end().value() - interval->start().value());
- interval = interval->next();
+ // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
+ // within the range, e.g. it's middle.
+ for (UsePosition* pos = range->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() != UsePositionType::kRequiresRegister) continue;
+ LifetimePosition before = GetSplitPositionForInstruction(
+ range, code, pos->pos().ToInstructionIndex());
+ if (before.IsValid()) return before;
+ LifetimePosition after = GetSplitPositionForInstruction(
+ range, code, pos->pos().ToInstructionIndex() + 1);
+ if (after.IsValid()) return after;
}
+ return LifetimePosition::Invalid();
+}
+
- return size;
+bool IsProgressPossible(const LiveRange* range,
+ const InstructionSequence* code) {
+ return range->CanBeSpilled(range->Start()) ||
+ GetLastResortSplitPosition(range, code).IsValid();
}
+} // namespace
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
- allocations_[reg_id]->Insert(range);
- if (range->HasRegisterAssigned()) {
- DCHECK_EQ(reg_id, range->assigned_register());
- return;
- }
- range->set_assigned_register(reg_id);
- range->SetUseHints(reg_id);
- if (range->is_phi()) {
- data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
- }
+AllocationCandidate AllocationScheduler::GetNext() {
+ DCHECK(!queue_.empty());
+ AllocationCandidate ret = queue_.top();
+ queue_.pop();
+ return ret;
}
-float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
- InstructionOperand* first_hint = nullptr;
- if (range->FirstHintPosition() != nullptr) {
- first_hint = range->FirstHintPosition()->operand();
- }
+void AllocationScheduler::Schedule(LiveRange* range) {
+ TRACE("Scheduling live range %d.\n", range->id());
+ queue_.push(AllocationCandidate(range));
+}
- if (range->IsFixed()) return std::numeric_limits<float>::max();
- bool spill;
- if (!FindProgressingSplitPosition(range, &spill).IsValid())
- return std::numeric_limits<float>::max();
+GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
+ RegisterKind kind, Zone* local_zone)
+ : RegisterAllocator(data, kind),
+ local_zone_(local_zone),
+ allocations_(local_zone),
+ scheduler_(local_zone) {}
- float multiplier = 1.0;
- if (first_hint != nullptr && first_hint->IsRegister()) {
- multiplier = 3.0;
- }
- unsigned use_count = 0;
- auto* pos = range->first_pos();
- while (pos != nullptr) {
- use_count++;
- pos = pos->next();
- }
+void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
+ TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id),
+ range->id());
- unsigned range_size = GetLiveRangeSize(range);
- DCHECK_NE(0U, range_size);
+ DCHECK(!range->HasRegisterAssigned());
- return multiplier * static_cast<float>(use_count) /
- static_cast<float>(range_size);
-}
+ current_allocations(reg_id)->AllocateRange(range);
+ TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
+ range->set_assigned_register(reg_id);
-float GreedyAllocator::CalculateMaxSpillWeight(
- const ZoneSet<LiveRange*>& ranges) {
- float max = 0.0;
- for (auto* r : ranges) {
- max = std::max(max, CalculateSpillWeight(r));
- }
- return max;
+ DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
}
-void GreedyAllocator::Evict(LiveRange* range) {
- bool removed = allocations_[range->assigned_register()]->Remove(range);
- CHECK(removed);
- range->UnsetUseHints();
- range->UnsetAssignedRegister();
- if (range->is_phi()) {
- data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
+void GreedyAllocator::PreallocateFixedRanges() {
+ allocations_.resize(num_registers());
+ for (int i = 0; i < num_registers(); i++) {
+ allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
}
-}
-
-bool GreedyAllocator::TryAllocatePhysicalRegister(
- unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
- auto* segment = range->first_interval();
+ for (LiveRange* fixed_range : GetFixedRegisters()) {
+ if (fixed_range != nullptr) {
+ DCHECK_EQ(mode(), fixed_range->kind());
+ DCHECK(fixed_range->IsFixed());
- auto* alloc_info = allocations_[reg_id];
- while (segment != nullptr) {
- if (auto* existing = alloc_info->Find(segment)) {
- DCHECK(existing->HasRegisterAssigned());
- conflicting->insert(existing);
+ int reg_nr = fixed_range->assigned_register();
+ EnsureValidRangeWeight(fixed_range);
+ current_allocations(reg_nr)->AllocateRange(fixed_range);
}
- segment = segment->next();
}
- if (!conflicting->empty()) return false;
- // No conflicts means we can safely allocate this register to this range.
- AssignRangeToRegister(reg_id, range);
- return true;
}
-int GreedyAllocator::GetHintedRegister(LiveRange* range) {
- int reg;
- if (range->FirstHintPosition(®) != nullptr) {
- return reg;
+void GreedyAllocator::ScheduleAllocationCandidates() {
+ for (auto range : data()->live_ranges()) {
+ if (CanProcessRange(range) && !range->spilled()) {
+ scheduler().Schedule(range);
+ }
}
- return -1;
}
-bool GreedyAllocator::TryAllocate(LiveRange* current,
- ZoneSet<LiveRange*>* conflicting) {
- if (current->IsFixed()) {
- return TryAllocatePhysicalRegister(current->assigned_register(), current,
- conflicting);
- }
+void GreedyAllocator::TryAllocateCandidate(
+ const AllocationCandidate& candidate) {
+ // At this point, this is just a live range. TODO: groups.
+ TryAllocateLiveRange(candidate.live_range());
+}
- int hinted_reg_id = GetHintedRegister(current);
- if (hinted_reg_id >= 0) {
- if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
- return true;
- }
- }
- ZoneSet<LiveRange*> local_conflicts(local_zone());
- for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
- candidate_reg++) {
- if (hinted_reg_id >= 0 &&
- candidate_reg == static_cast<size_t>(hinted_reg_id))
- continue;
- local_conflicts.clear();
- if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
- conflicting->clear();
- return true;
- } else {
- conflicting->insert(local_conflicts.begin(), local_conflicts.end());
- }
- }
- return false;
-}
+void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
+ // TODO(mtrofin): once we introduce groups, we'll want to first try and
+ // allocate at the preferred register.
+ TRACE("Attempting to allocate live range %d\n", range->id());
+ int free_reg = -1;
+ int evictable_reg = -1;
+ EnsureValidRangeWeight(range);
+ DCHECK(range->weight() != LiveRange::kInvalidWeight);
+ float smallest_weight = LiveRange::kMaxWeight;
-LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
- LifetimePosition start,
- LifetimePosition until,
- LifetimePosition end) {
- CHECK(start < end);
- auto second_part = SplitRangeAt(range, start);
-
- if (second_part->Start() < end) {
- // The split result intersects with [start, end[.
- // Split it at position between ]start+1, end[, spill the middle part
- // and put the rest to unhandled.
- auto third_part_end = end.PrevStart().End();
- if (data()->IsBlockBoundary(end.Start())) {
- third_part_end = end.Start();
+ // Seek either the first free register, or, from the set of registers
+ // where the maximum conflict is lower than the candidate's weight, the one
+ // with the smallest such weight.
+ for (int i = 0; i < num_registers(); i++) {
+ float max_conflict_weight =
+ current_allocations(i)->GetMaximumConflictingWeight(range);
+ if (max_conflict_weight == LiveRange::kInvalidWeight) {
+ free_reg = i;
+ break;
}
- auto third_part = SplitBetween(
- second_part, Max(second_part->Start().End(), until), third_part_end);
-
- DCHECK(third_part != second_part);
+ if (max_conflict_weight < range->weight() &&
+ max_conflict_weight < smallest_weight) {
+ smallest_weight = max_conflict_weight;
+ evictable_reg = i;
+ }
+ }
- Spill(second_part);
- return third_part;
- } else {
- // The split result does not intersect with [start, end[.
- // Nothing to spill. Just return it for re-processing.
- return second_part;
+ // We have a free register, so we use it.
+ if (free_reg >= 0) {
+ TRACE("Found free register %s for live range %d\n", RegisterName(free_reg),
+ range->id());
+ AssignRangeToRegister(free_reg, range);
+ return;
}
-}
+ // We found a register to perform evictions, so we evict and allocate our
+ // candidate.
+ if (evictable_reg >= 0) {
+ TRACE("Found evictable register %s for live range %d\n",
+ RegisterName(free_reg), range->id());
+ current_allocations(evictable_reg)
+ ->EvictAndRescheduleConflicts(range, &scheduler());
+ AssignRangeToRegister(evictable_reg, range);
+ return;
+ }
-void GreedyAllocator::Enqueue(LiveRange* range) {
- if (range == nullptr || range->IsEmpty()) return;
- unsigned size = GetLiveRangeSize(range);
- TRACE("Enqueuing range %d\n", range->id());
- queue_.push(std::make_pair(size, range));
+ // The range needs to be split or spilled.
+ SplitOrSpillBlockedRange(range);
}
-bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
- auto position = range->Start();
- TRACE("Processing interval %d start=%d\n", range->id(), position.value());
+void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
+ size_t initial_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < initial_range_count; ++i) {
+ auto range = data()->live_ranges()[i];
+ if (!CanProcessRange(range)) continue;
+ if (range->HasNoSpillType()) continue;
- if (!range->HasNoSpillType()) {
- TRACE("Live range %d already has a spill operand\n", range->id());
- auto next_pos = position;
+ LifetimePosition start = range->Start();
+ TRACE("Live range %d is defined by a spill operand.\n", range->id());
+ auto next_pos = start;
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
}
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
Spill(range);
- return true;
} else if (pos->pos() > range->Start().NextStart()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
- auto* reminder = SpillBetweenUntil(range, position, position, pos->pos());
- Enqueue(reminder);
- return true;
+ auto split_pos = pos->pos();
+ if (data()->IsBlockBoundary(split_pos.Start())) {
+ split_pos = split_pos.Start();
+ } else {
+ split_pos = split_pos.PrevStart().End();
+ }
+ Split(range, data(), split_pos);
+ Spill(range);
}
}
- return false;
}
void GreedyAllocator::AllocateRegisters() {
- for (auto range : data()->live_ranges()) {
- if (range == nullptr) continue;
- if (range->kind() == mode()) {
- DCHECK(!range->HasRegisterAssigned() && !range->spilled());
- TRACE("Enqueueing live range %d to priority queue \n", range->id());
- Enqueue(range);
- }
- }
+ CHECK(scheduler().empty());
+ CHECK(allocations_.empty());
- allocations_.resize(num_registers());
- for (int i = 0; i < num_registers(); i++) {
- allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
- }
+ TRACE("Begin allocating function %s with the Greedy Allocator\n",
+ data()->debug_name());
- for (auto* current : GetFixedRegisters()) {
- if (current != nullptr) {
- DCHECK_EQ(mode(), current->kind());
- int reg_nr = current->assigned_register();
- bool inserted = allocations_[reg_nr]->Insert(current);
- CHECK(inserted);
- }
+ SplitAndSpillRangesDefinedByMemoryOperand();
+ PreallocateFixedRanges();
+ ScheduleAllocationCandidates();
+
+ while (!scheduler().empty()) {
+ AllocationCandidate candidate = scheduler().GetNext();
+ TryAllocateCandidate(candidate);
}
- while (!queue_.empty()) {
- auto current_pair = queue_.top();
- queue_.pop();
- auto current = current_pair.second;
- if (HandleSpillOperands(current)) {
- continue;
- }
- bool spill = false;
- ZoneSet<LiveRange*> conflicting(local_zone());
- if (!TryAllocate(current, &conflicting)) {
- DCHECK(!conflicting.empty());
- float this_weight = std::numeric_limits<float>::max();
- LifetimePosition split_pos =
- FindProgressingSplitPosition(current, &spill);
- if (split_pos.IsValid()) {
- this_weight = CalculateSpillWeight(current);
- }
- bool evicted = false;
- for (auto* conflict : conflicting) {
- if (CalculateSpillWeight(conflict) < this_weight) {
- Evict(conflict);
- Enqueue(conflict);
- evicted = true;
- }
- }
- if (evicted) {
- conflicting.clear();
- TryAllocate(current, &conflicting);
- }
- if (!conflicting.empty()) {
- DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
- DCHECK(split_pos.IsValid());
- AllocateBlockedRange(current, split_pos, spill);
- }
+ // We do not rely on the hint mechanism used by LinearScan, so no need to
+ // actively update/reset operands until the end.
+ for (auto range : data()->live_ranges()) {
+ if (CanProcessRange(range) && range->HasRegisterAssigned()) {
+ UpdateOperands(range, data());
}
}
for (size_t i = 0; i < allocations_.size(); ++i) {
- if (!allocations_[i]->IsEmpty()) {
+ if (!allocations_[i]->empty()) {
data()->MarkAllocated(mode(), static_cast<int>(i));
}
}
-}
-
+ allocations_.clear();
-LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) {
- auto ret = pos.PrevStart().End();
- if (data()->IsBlockBoundary(pos.Start())) {
- ret = pos.Start();
- }
- DCHECK(ret <= pos);
- return ret;
+ TRACE("End allocating function %s with the Greedy Allocator\n",
+ data()->debug_name());
}
-LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
- LiveRange* range, bool* is_spill_pos) {
- auto start = range->Start();
- auto end = range->End();
- UsePosition* next_reg_use = range->first_pos();
- while (next_reg_use != nullptr &&
- next_reg_use->type() != UsePositionType::kRequiresRegister) {
- next_reg_use = next_reg_use->next();
- }
+void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
+ // The live range weight will be invalidated when ranges are created or split.
+ // Otherwise, it is consistently updated when the range is allocated or
+ // unallocated.
+ if (range->weight() != LiveRange::kInvalidWeight) return;
- if (next_reg_use == nullptr) {
- *is_spill_pos = true;
- auto ret = FindOptimalSpillingPos(range, start);
- DCHECK(ret.IsValid());
- return ret;
+ if (range->IsFixed()) {
+ range->set_weight(LiveRange::kMaxWeight);
+ return;
}
-
- *is_spill_pos = false;
- auto reg_pos = next_reg_use->pos();
- auto correct_pos = GetSplittablePos(reg_pos);
- if (start < correct_pos && correct_pos < end) {
- return correct_pos;
+ if (!IsProgressPossible(range, code())) {
+ range->set_weight(LiveRange::kMaxWeight);
+ return;
}
- if (correct_pos >= end) {
- return LifetimePosition::Invalid();
+ float use_count = 0.0;
+ for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
+ ++use_count;
}
+ range->set_weight(use_count / static_cast<float>(range->GetSize()));
+}
- // Correct_pos must be at or before start. Find the next use position.
- next_reg_use = next_reg_use->next();
- auto reference = reg_pos;
- while (next_reg_use != nullptr) {
- auto pos = next_reg_use->pos();
- // Skip over tight successive uses.
- if (reference.NextStart() < pos) {
- break;
- }
- reference = pos;
- next_reg_use = next_reg_use->next();
- }
- if (next_reg_use == nullptr) {
- // While there may not be another use, we may still have space in the range
- // to clip off.
- correct_pos = reference.NextStart();
- if (start < correct_pos && correct_pos < end) {
- return correct_pos;
- }
- return LifetimePosition::Invalid();
- }
+void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
+ LifetimePosition start = range->Start();
+ CHECK(range->CanBeSpilled(start));
- correct_pos = GetSplittablePos(next_reg_use->pos());
- if (start < correct_pos && correct_pos < end) {
- DCHECK(reference < correct_pos);
- return correct_pos;
- }
- return LifetimePosition::Invalid();
+ DCHECK(range->NextRegisterPosition(start) == nullptr);
+ Spill(range);
}
-void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
- LifetimePosition pos, bool spill) {
- auto tail = SplitRangeAt(current, pos);
- if (spill) {
- Spill(tail);
- } else {
- Enqueue(tail);
- }
- if (tail != current) {
- Enqueue(current);
+void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
+ // TODO(mtrofin): replace the call below with the entry point selecting
+ // heuristics, once they exist, out of which GLRSP is the last one.
+ auto pos = GetLastResortSplitPosition(range, code());
+ if (pos.IsValid()) {
+ LiveRange* tail = Split(range, data(), pos);
+ DCHECK(tail != range);
+ scheduler().Schedule(tail);
+ scheduler().Schedule(range);
+ return;
}
+ SpillRangeAsLastResort(range);
}