[turbofan] Greedy allocator refactoring.
authormtrofin <mtrofin@chromium.org>
Mon, 29 Jun 2015 15:56:26 +0000 (08:56 -0700)
committerCommit bot <commit-bot@chromium.org>
Mon, 29 Jun 2015 15:56:33 +0000 (15:56 +0000)
Separated core greedy allocator concepts, exposing the APIs we would want to continue working with. In particular, this change completely reworks CoalescedLiveRanges to reflect the fact that we expect more than one possible conflict, scrapping the initial design of the structure. Since this is a critical part of the design, this change may be thought of as a full rewrite of the algorithm.

Reduced all heuristics to just 2 essential ones: split "somewhere", which we'll still need when all other heuristics fail; and spill.

Introduced a simple primitive for splitting - at GapPosition::START. The goal is to use such primitives to quickly and reliably author heuristics.

I expected this primitive to "just work" for any arbitrary instruction index within a live range - e.g. its middle. That's not the case, it seems to upset execution in certain scenarios. Restricting to either before/after use positions seems to work. I'm still investigating what the source of failures is in the case of "arbitrary instruction in the range" case.

I intended to document the rationale and prove the soundness of always using START for splits, but I will postpone to after this last remaining issue is resolved.

Review URL: https://codereview.chromium.org/1205173002

Cr-Commit-Position: refs/heads/master@{#29352}

BUILD.gn
src/compiler/coalesced-live-ranges.cc [new file with mode: 0644]
src/compiler/coalesced-live-ranges.h [new file with mode: 0644]
src/compiler/greedy-allocator.cc
src/compiler/greedy-allocator.h
src/compiler/register-allocator.cc
src/compiler/register-allocator.h
tools/gyp/v8.gyp

index 47f6772fcccdfbc918dc856bb2a55d1cca400491..5e8166c07230dd6d20a5d24894a0a0d8d96d751a 100644 (file)
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -622,6 +622,8 @@ source_set("v8_base") {
     "src/compiler/basic-block-instrumentor.h",
     "src/compiler/change-lowering.cc",
     "src/compiler/change-lowering.h",
+    "src/compiler/coalesced-live-ranges.cc",
+    "src/compiler/coalesced-live-ranges.h",
     "src/compiler/code-generator-impl.h",
     "src/compiler/code-generator.cc",
     "src/compiler/code-generator.h",
diff --git a/src/compiler/coalesced-live-ranges.cc b/src/compiler/coalesced-live-ranges.cc
new file mode 100644 (file)
index 0000000..e81f551
--- /dev/null
@@ -0,0 +1,148 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(...)                             \
+  do {                                         \
+    if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+  } while (false)
+
+
+const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
+
+void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
+  UpdateWeightAtAllocation(range);
+  for (auto interval = range->first_interval(); interval != nullptr;
+       interval = interval->next()) {
+    storage().insert({interval->start(), interval->end(), range});
+  }
+}
+
+
+void CoalescedLiveRanges::Remove(LiveRange* range) {
+  for (auto interval = range->first_interval(); interval != nullptr;
+       interval = interval->next()) {
+    storage().erase({interval->start(), interval->end(), nullptr});
+  }
+  range->UnsetAssignedRegister();
+}
+
+
+float CoalescedLiveRanges::GetMaximumConflictingWeight(
+    const LiveRange* range) const {
+  float ret = LiveRange::kInvalidWeight;
+  auto end = storage().end();
+  for (auto query = range->first_interval(); query != nullptr;
+       query = query->next()) {
+    auto conflict = GetFirstConflict(query);
+
+    if (conflict == end) continue;
+    for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
+      // It is possible we'll visit the same range multiple times, because
+      // successive (not necessarily consecutive) intervals belong to the same
+      // range, or because different intervals of the query range have the same
+      // range as conflict.
+      DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight);
+      ret = Max(ret, conflict->range->weight());
+      if (ret == LiveRange::kMaxWeight) break;
+    }
+  }
+  return ret;
+}
+
+
+void CoalescedLiveRanges::EvictAndRescheduleConflicts(
+    LiveRange* range, AllocationScheduler* scheduler) {
+  auto end = storage().end();
+
+  for (auto query = range->first_interval(); query != nullptr;
+       query = query->next()) {
+    auto conflict = GetFirstConflict(query);
+    if (conflict == end) continue;
+    while (QueryIntersectsAllocatedInterval(query, conflict)) {
+      LiveRange* range_to_evict = conflict->range;
+      // Bypass successive intervals belonging to the same range, because we're
+      // about to remove this range, and we don't want the storage iterator to
+      // become invalid.
+      while (conflict != end && conflict->range == range_to_evict) {
+        ++conflict;
+      }
+
+      DCHECK(range_to_evict->HasRegisterAssigned());
+      CHECK(!range_to_evict->IsFixed());
+      Remove(range_to_evict);
+      UpdateWeightAtEviction(range_to_evict);
+      TRACE("Evicted range %d.\n", range_to_evict->id());
+      scheduler->Schedule(range_to_evict);
+    }
+  }
+}
+
+
+bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
+  LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
+  for (auto i : storage_) {
+    if (i.start < last_end) {
+      return false;
+    }
+    last_end = i.end;
+  }
+  return true;
+}
+
+
+void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) {
+  DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+  range->set_weight(range->weight() * kAllocatedRangeMultiplier);
+}
+
+
+void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) {
+  DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+  range->set_weight(range->weight() / kAllocatedRangeMultiplier);
+}
+
+
+CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
+    const UseInterval* query) const {
+  DCHECK(query != nullptr);
+  auto end = storage().end();
+  LifetimePosition q_start = query->start();
+  LifetimePosition q_end = query->end();
+
+  if (storage().empty() || storage().rbegin()->end <= q_start ||
+      storage().begin()->start >= q_end) {
+    return end;
+  }
+
+  auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
+  // ret is either at the end (no start strictly greater than q_start) or
+  // at some position with the aforementioned property. In either case, the
+  // allocated interval before this one may intersect our query:
+  // either because, although it starts before this query's start, it ends
+  // after; or because it starts exactly at the query start. So unless we're
+  // right at the beginning of the storage - meaning the first allocated
+  // interval is also starting after this query's start - see what's behind.
+  if (ret != storage().begin()) {
+    --ret;
+    if (!QueryIntersectsAllocatedInterval(query, ret)) {
+      // The interval behind wasn't intersecting, so move back.
+      ++ret;
+    }
+  }
+  if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
+  return end;
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/coalesced-live-ranges.h b/src/compiler/coalesced-live-ranges.h
new file mode 100644 (file)
index 0000000..f125172
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COALESCED_LIVE_RANGES_H_
+#define V8_COALESCED_LIVE_RANGES_H_
+
+#include "src/compiler/register-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+class AllocationScheduler;
+
+
+// Collection of live ranges allocated to the same register.
+// It supports efficiently finding all conflicts for a given, non-allocated
+// range. See AllocatedInterval.
+// Allocated live ranges do not intersect. At most, individual use intervals
+// touch. We store, for a live range, an AllocatedInterval corresponding to each
+// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
+// by starts. Then, given the non-intersecting property, we know that
+// consecutive AllocatedIntervals have the property that the "smaller"'s end is
+// less or equal to the "larger"'s start.
+// This allows for quick (logarithmic complexity) identification of the first
+// AllocatedInterval to conflict with a given LiveRange, and then for efficient
+// traversal of conflicts.
+class CoalescedLiveRanges : public ZoneObject {
+ public:
+  explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
+  void clear() { storage_.clear(); }
+
+  bool empty() const { return storage_.empty(); }
+
+  // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+  // a range conflicting with the given range.
+  float GetMaximumConflictingWeight(const LiveRange* range) const;
+
+  // Evicts all conflicts of the given range, and reschedules them with the
+  // provided scheduler.
+  void EvictAndRescheduleConflicts(LiveRange* range,
+                                   AllocationScheduler* scheduler);
+
+  // Allocates a range with a pre-calculated candidate weight.
+  void AllocateRange(LiveRange* range);
+
+  // TODO(mtrofin): remove this in favor of comprehensive unit tests.
+  bool VerifyAllocationsAreValid() const;
+
+ private:
+  static const float kAllocatedRangeMultiplier;
+  // Storage detail for CoalescedLiveRanges.
+  struct AllocatedInterval {
+    LifetimePosition start;
+    LifetimePosition end;
+    LiveRange* range;
+    bool operator<(const AllocatedInterval& other) const {
+      return start < other.start;
+    }
+    bool operator>(const AllocatedInterval& other) const {
+      return start > other.start;
+    }
+  };
+  typedef ZoneSet<AllocatedInterval> IntervalStore;
+  typedef IntervalStore::const_iterator interval_iterator;
+
+  IntervalStore& storage() { return storage_; }
+  const IntervalStore& storage() const { return storage_; }
+
+  // Augment the weight of a range that is about to be allocated.
+  static void UpdateWeightAtAllocation(LiveRange* range);
+
+  // Reduce the weight of a range that has lost allocation.
+  static void UpdateWeightAtEviction(LiveRange* range);
+
+  // Intersection utilities.
+  static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
+                         LifetimePosition b_start, LifetimePosition b_end) {
+    return a_start < b_end && b_start < a_end;
+  }
+  static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
+    return {pos, LifetimePosition::Invalid(), nullptr};
+  }
+
+  bool QueryIntersectsAllocatedInterval(const UseInterval* query,
+                                        interval_iterator& pos) const {
+    DCHECK(query != nullptr);
+    return pos != storage().end() &&
+           Intersects(query->start(), query->end(), pos->start, pos->end);
+  }
+
+  void Remove(LiveRange* range);
+
+  // Get the first interval intersecting query. Since the intervals are sorted,
+  // subsequent intervals intersecting query follow.
+  interval_iterator GetFirstConflict(const UseInterval* query) const;
+
+  IntervalStore storage_;
+  DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
+};
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_COALESCED_LIVE_RANGES_H_
index 0aedb58a2e1be758af50c10d6d12d990aa2dc6ef..8d658c39ff4203bb420e7f184dd46f8634b1900f 100644 (file)
@@ -15,271 +15,228 @@ namespace compiler {
   } while (false)
 
 
-class CoalescedLiveRanges : public ZoneObject {
- public:
-  explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
+namespace {
 
-  LiveRange* Find(UseInterval* query) {
-    ZoneSplayTree<Config>::Locator locator;
 
-    if (storage_.Find(GetKey(query), &locator)) {
-      return locator.value();
-    }
-    return nullptr;
+void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
+  int reg_id = range->assigned_register();
+  range->SetUseHints(reg_id);
+  if (range->is_phi()) {
+    data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
   }
+}
 
-  // TODO(mtrofin): Change to void returning if we do not care if the interval
-  // was previously added.
-  bool Insert(LiveRange* range) {
-    auto* interval = range->first_interval();
-    while (interval != nullptr) {
-      if (!Insert(interval, range)) return false;
-      interval = interval->next();
-    }
-    return true;
-  }
 
-  bool Remove(LiveRange* range) {
-    bool ret = false;
-    auto* segment = range->first_interval();
-    while (segment != nullptr) {
-      ret |= Remove(segment);
-      segment = segment->next();
-    }
-    return ret;
-  }
+LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
+                 LifetimePosition pos) {
+  DCHECK(range->Start() < pos && pos < range->End());
+  DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+         (data->code()
+              ->GetInstructionBlock(pos.ToInstructionIndex())
+              ->last_instruction_index() != pos.ToInstructionIndex()));
+  LiveRange* result = data->NewChildRangeFor(range);
+  range->SplitAt(pos, result, data->allocation_zone());
+  return result;
+}
 
-  bool IsEmpty() { return storage_.is_empty(); }
-
- private:
-  struct Config {
-    typedef std::pair<int, int> Key;
-    typedef LiveRange* Value;
-    static const Key kNoKey;
-    static Value NoValue() { return nullptr; }
-    static int Compare(const Key& a, const Key& b) {
-      if (a.second <= b.first) return -1;
-      if (a.first >= b.second) return 1;
-      return 0;
-    }
-  };
 
-  Config::Key GetKey(UseInterval* interval) {
-    if (interval == nullptr) return std::make_pair(0, 0);
-    return std::make_pair(interval->start().value(), interval->end().value());
-  }
+// TODO(mtrofin): explain why splitting in gap START is always OK.
+LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+                                                const InstructionSequence* code,
+                                                int instruction_index) {
+  LifetimePosition ret = LifetimePosition::Invalid();
 
-  // TODO(mtrofin): Change to void returning if we do not care if the interval
-  // was previously added.
-  bool Insert(UseInterval* interval, LiveRange* range) {
-    ZoneSplayTree<Config>::Locator locator;
-    bool ret = storage_.Insert(GetKey(interval), &locator);
-    if (ret) locator.set_value(range);
-    return ret;
+  ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+  if (range->Start() >= ret || ret >= range->End()) {
+    return LifetimePosition::Invalid();
   }
+  return ret;
+}
 
-  bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
 
-  ZoneSplayTree<Config> storage_;
-  DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
+int GetFirstGapIndex(const UseInterval* interval) {
+  LifetimePosition start = interval->start();
+  int ret = start.ToInstructionIndex();
+  return ret;
+}
 
 
-const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0};
+int GetLastGapIndex(const UseInterval* interval) {
+  LifetimePosition end = interval->end();
+  return end.ToInstructionIndex();
+}
 
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
-                                 RegisterKind kind, Zone* local_zone)
-    : RegisterAllocator(data, kind),
-      local_zone_(local_zone),
-      allocations_(local_zone),
-      queue_(local_zone) {}
 
+// Basic heuristic for advancing the algorithm, if any other splitting heuristic
+// failed.
+LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
+                                            const InstructionSequence* code) {
+  if (range->first_interval()->next() != nullptr) {
+    return range->first_interval()->next()->start();
+  }
 
-unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
-  auto interval = range->first_interval();
-  if (interval == nullptr) return 0;
+  UseInterval* interval = range->first_interval();
+  int first = GetFirstGapIndex(interval);
+  int last = GetLastGapIndex(interval);
+  if (first == last) return LifetimePosition::Invalid();
 
-  unsigned size = 0;
-  while (interval != nullptr) {
-    size += (interval->end().value() - interval->start().value());
-    interval = interval->next();
+  // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
+  // within the range, e.g. it's middle.
+  for (UsePosition* pos = range->first_pos(); pos != nullptr;
+       pos = pos->next()) {
+    if (pos->type() != UsePositionType::kRequiresRegister) continue;
+    LifetimePosition before = GetSplitPositionForInstruction(
+        range, code, pos->pos().ToInstructionIndex());
+    if (before.IsValid()) return before;
+    LifetimePosition after = GetSplitPositionForInstruction(
+        range, code, pos->pos().ToInstructionIndex() + 1);
+    if (after.IsValid()) return after;
   }
+  return LifetimePosition::Invalid();
+}
+
 
-  return size;
+bool IsProgressPossible(const LiveRange* range,
+                        const InstructionSequence* code) {
+  return range->CanBeSpilled(range->Start()) ||
+         GetLastResortSplitPosition(range, code).IsValid();
 }
+}  // namespace
 
 
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
-  allocations_[reg_id]->Insert(range);
-  if (range->HasRegisterAssigned()) {
-    DCHECK_EQ(reg_id, range->assigned_register());
-    return;
-  }
-  range->set_assigned_register(reg_id);
-  range->SetUseHints(reg_id);
-  if (range->is_phi()) {
-    data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
-  }
+AllocationCandidate AllocationScheduler::GetNext() {
+  DCHECK(!queue_.empty());
+  AllocationCandidate ret = queue_.top();
+  queue_.pop();
+  return ret;
 }
 
 
-float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
-  InstructionOperand* first_hint = nullptr;
-  if (range->FirstHintPosition() != nullptr) {
-    first_hint = range->FirstHintPosition()->operand();
-  }
+void AllocationScheduler::Schedule(LiveRange* range) {
+  TRACE("Scheduling live range %d.\n", range->id());
+  queue_.push(AllocationCandidate(range));
+}
 
-  if (range->IsFixed()) return std::numeric_limits<float>::max();
-  bool spill;
-  if (!FindProgressingSplitPosition(range, &spill).IsValid())
-    return std::numeric_limits<float>::max();
+GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
+                                 RegisterKind kind, Zone* local_zone)
+    : RegisterAllocator(data, kind),
+      local_zone_(local_zone),
+      allocations_(local_zone),
+      scheduler_(local_zone) {}
 
-  float multiplier = 1.0;
-  if (first_hint != nullptr && first_hint->IsRegister()) {
-    multiplier = 3.0;
-  }
 
-  unsigned use_count = 0;
-  auto* pos = range->first_pos();
-  while (pos != nullptr) {
-    use_count++;
-    pos = pos->next();
-  }
+void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
+  TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id),
+        range->id());
 
-  unsigned range_size = GetLiveRangeSize(range);
-  DCHECK_NE(0U, range_size);
+  DCHECK(!range->HasRegisterAssigned());
 
-  return multiplier * static_cast<float>(use_count) /
-         static_cast<float>(range_size);
-}
+  current_allocations(reg_id)->AllocateRange(range);
 
+  TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
+  range->set_assigned_register(reg_id);
 
-float GreedyAllocator::CalculateMaxSpillWeight(
-    const ZoneSet<LiveRange*>& ranges) {
-  float max = 0.0;
-  for (auto* r : ranges) {
-    max = std::max(max, CalculateSpillWeight(r));
-  }
-  return max;
+  DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
 }
 
 
-void GreedyAllocator::Evict(LiveRange* range) {
-  bool removed = allocations_[range->assigned_register()]->Remove(range);
-  CHECK(removed);
-  range->UnsetUseHints();
-  range->UnsetAssignedRegister();
-  if (range->is_phi()) {
-    data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
+void GreedyAllocator::PreallocateFixedRanges() {
+  allocations_.resize(num_registers());
+  for (int i = 0; i < num_registers(); i++) {
+    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
   }
-}
-
 
-bool GreedyAllocator::TryAllocatePhysicalRegister(
-    unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
-  auto* segment = range->first_interval();
+  for (LiveRange* fixed_range : GetFixedRegisters()) {
+    if (fixed_range != nullptr) {
+      DCHECK_EQ(mode(), fixed_range->kind());
+      DCHECK(fixed_range->IsFixed());
 
-  auto* alloc_info = allocations_[reg_id];
-  while (segment != nullptr) {
-    if (auto* existing = alloc_info->Find(segment)) {
-      DCHECK(existing->HasRegisterAssigned());
-      conflicting->insert(existing);
+      int reg_nr = fixed_range->assigned_register();
+      EnsureValidRangeWeight(fixed_range);
+      current_allocations(reg_nr)->AllocateRange(fixed_range);
     }
-    segment = segment->next();
   }
-  if (!conflicting->empty()) return false;
-  // No conflicts means we can safely allocate this register to this range.
-  AssignRangeToRegister(reg_id, range);
-  return true;
 }
 
 
-int GreedyAllocator::GetHintedRegister(LiveRange* range) {
-  int reg;
-  if (range->FirstHintPosition(&reg) != nullptr) {
-    return reg;
+void GreedyAllocator::ScheduleAllocationCandidates() {
+  for (auto range : data()->live_ranges()) {
+    if (CanProcessRange(range) && !range->spilled()) {
+      scheduler().Schedule(range);
+    }
   }
-  return -1;
 }
 
 
-bool GreedyAllocator::TryAllocate(LiveRange* current,
-                                  ZoneSet<LiveRange*>* conflicting) {
-  if (current->IsFixed()) {
-    return TryAllocatePhysicalRegister(current->assigned_register(), current,
-                                       conflicting);
-  }
+void GreedyAllocator::TryAllocateCandidate(
+    const AllocationCandidate& candidate) {
+  // At this point, this is just a live range. TODO: groups.
+  TryAllocateLiveRange(candidate.live_range());
+}
 
-  int hinted_reg_id = GetHintedRegister(current);
-  if (hinted_reg_id >= 0) {
-    if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
-      return true;
-    }
-  }
 
-  ZoneSet<LiveRange*> local_conflicts(local_zone());
-  for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
-       candidate_reg++) {
-    if (hinted_reg_id >= 0 &&
-        candidate_reg == static_cast<size_t>(hinted_reg_id))
-      continue;
-    local_conflicts.clear();
-    if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
-      conflicting->clear();
-      return true;
-    } else {
-      conflicting->insert(local_conflicts.begin(), local_conflicts.end());
-    }
-  }
-  return false;
-}
+void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
+  // TODO(mtrofin): once we introduce groups, we'll want to first try and
+  // allocate at the preferred register.
+  TRACE("Attempting to allocate live range %d\n", range->id());
+  int free_reg = -1;
+  int evictable_reg = -1;
+  EnsureValidRangeWeight(range);
+  DCHECK(range->weight() != LiveRange::kInvalidWeight);
 
+  float smallest_weight = LiveRange::kMaxWeight;
 
-LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
-                                              LifetimePosition start,
-                                              LifetimePosition until,
-                                              LifetimePosition end) {
-  CHECK(start < end);
-  auto second_part = SplitRangeAt(range, start);
-
-  if (second_part->Start() < end) {
-    // The split result intersects with [start, end[.
-    // Split it at position between ]start+1, end[, spill the middle part
-    // and put the rest to unhandled.
-    auto third_part_end = end.PrevStart().End();
-    if (data()->IsBlockBoundary(end.Start())) {
-      third_part_end = end.Start();
+  // Seek either the first free register, or, from the set of registers
+  // where the maximum conflict is lower than the candidate's weight, the one
+  // with the smallest such weight.
+  for (int i = 0; i < num_registers(); i++) {
+    float max_conflict_weight =
+        current_allocations(i)->GetMaximumConflictingWeight(range);
+    if (max_conflict_weight == LiveRange::kInvalidWeight) {
+      free_reg = i;
+      break;
     }
-    auto third_part = SplitBetween(
-        second_part, Max(second_part->Start().End(), until), third_part_end);
-
-    DCHECK(third_part != second_part);
+    if (max_conflict_weight < range->weight() &&
+        max_conflict_weight < smallest_weight) {
+      smallest_weight = max_conflict_weight;
+      evictable_reg = i;
+    }
+  }
 
-    Spill(second_part);
-    return third_part;
-  } else {
-    // The split result does not intersect with [start, end[.
-    // Nothing to spill. Just return it for re-processing.
-    return second_part;
+  // We have a free register, so we use it.
+  if (free_reg >= 0) {
+    TRACE("Found free register %s for live range %d\n", RegisterName(free_reg),
+          range->id());
+    AssignRangeToRegister(free_reg, range);
+    return;
   }
-}
 
+  // We found a register to perform evictions, so we evict and allocate our
+  // candidate.
+  if (evictable_reg >= 0) {
+    TRACE("Found evictable register %s for live range %d\n",
+          RegisterName(free_reg), range->id());
+    current_allocations(evictable_reg)
+        ->EvictAndRescheduleConflicts(range, &scheduler());
+    AssignRangeToRegister(evictable_reg, range);
+    return;
+  }
 
-void GreedyAllocator::Enqueue(LiveRange* range) {
-  if (range == nullptr || range->IsEmpty()) return;
-  unsigned size = GetLiveRangeSize(range);
-  TRACE("Enqueuing range %d\n", range->id());
-  queue_.push(std::make_pair(size, range));
+  // The range needs to be split or spilled.
+  SplitOrSpillBlockedRange(range);
 }
 
 
-bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
-  auto position = range->Start();
-  TRACE("Processing interval %d start=%d\n", range->id(), position.value());
+void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
+  size_t initial_range_count = data()->live_ranges().size();
+  for (size_t i = 0; i < initial_range_count; ++i) {
+    auto range = data()->live_ranges()[i];
+    if (!CanProcessRange(range)) continue;
+    if (range->HasNoSpillType()) continue;
 
-  if (!range->HasNoSpillType()) {
-    TRACE("Live range %d already has a spill operand\n", range->id());
-    auto next_pos = position;
+    LifetimePosition start = range->Start();
+    TRACE("Live range %d is defined by a spill operand.\n", range->id());
+    auto next_pos = start;
     if (next_pos.IsGapPosition()) {
       next_pos = next_pos.NextStart();
     }
@@ -288,170 +245,103 @@ bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
     // register immediately, split it and spill the first part of the range.
     if (pos == nullptr) {
       Spill(range);
-      return true;
     } else if (pos->pos() > range->Start().NextStart()) {
       // Do not spill live range eagerly if use position that can benefit from
       // the register is too close to the start of live range.
-      auto* reminder = SpillBetweenUntil(range, position, position, pos->pos());
-      Enqueue(reminder);
-      return true;
+      auto split_pos = pos->pos();
+      if (data()->IsBlockBoundary(split_pos.Start())) {
+        split_pos = split_pos.Start();
+      } else {
+        split_pos = split_pos.PrevStart().End();
+      }
+      Split(range, data(), split_pos);
+      Spill(range);
     }
   }
-  return false;
 }
 
 
 void GreedyAllocator::AllocateRegisters() {
-  for (auto range : data()->live_ranges()) {
-    if (range == nullptr) continue;
-    if (range->kind() == mode()) {
-      DCHECK(!range->HasRegisterAssigned() && !range->spilled());
-      TRACE("Enqueueing live range %d to priority queue \n", range->id());
-      Enqueue(range);
-    }
-  }
+  CHECK(scheduler().empty());
+  CHECK(allocations_.empty());
 
-  allocations_.resize(num_registers());
-  for (int i = 0; i < num_registers(); i++) {
-    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
-  }
+  TRACE("Begin allocating function %s with the Greedy Allocator\n",
+        data()->debug_name());
 
-  for (auto* current : GetFixedRegisters()) {
-    if (current != nullptr) {
-      DCHECK_EQ(mode(), current->kind());
-      int reg_nr = current->assigned_register();
-      bool inserted = allocations_[reg_nr]->Insert(current);
-      CHECK(inserted);
-    }
+  SplitAndSpillRangesDefinedByMemoryOperand();
+  PreallocateFixedRanges();
+  ScheduleAllocationCandidates();
+
+  while (!scheduler().empty()) {
+    AllocationCandidate candidate = scheduler().GetNext();
+    TryAllocateCandidate(candidate);
   }
 
-  while (!queue_.empty()) {
-    auto current_pair = queue_.top();
-    queue_.pop();
-    auto current = current_pair.second;
-    if (HandleSpillOperands(current)) {
-      continue;
-    }
-    bool spill = false;
-    ZoneSet<LiveRange*> conflicting(local_zone());
-    if (!TryAllocate(current, &conflicting)) {
-      DCHECK(!conflicting.empty());
-      float this_weight = std::numeric_limits<float>::max();
-      LifetimePosition split_pos =
-          FindProgressingSplitPosition(current, &spill);
-      if (split_pos.IsValid()) {
-        this_weight = CalculateSpillWeight(current);
-      }
 
-      bool evicted = false;
-      for (auto* conflict : conflicting) {
-        if (CalculateSpillWeight(conflict) < this_weight) {
-          Evict(conflict);
-          Enqueue(conflict);
-          evicted = true;
-        }
-      }
-      if (evicted) {
-        conflicting.clear();
-        TryAllocate(current, &conflicting);
-      }
-      if (!conflicting.empty()) {
-        DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
-        DCHECK(split_pos.IsValid());
-        AllocateBlockedRange(current, split_pos, spill);
-      }
+  // We do not rely on the hint mechanism used by LinearScan, so no need to
+  // actively update/reset operands until the end.
+  for (auto range : data()->live_ranges()) {
+    if (CanProcessRange(range) && range->HasRegisterAssigned()) {
+      UpdateOperands(range, data());
     }
   }
 
   for (size_t i = 0; i < allocations_.size(); ++i) {
-    if (!allocations_[i]->IsEmpty()) {
+    if (!allocations_[i]->empty()) {
       data()->MarkAllocated(mode(), static_cast<int>(i));
     }
   }
-}
-
+  allocations_.clear();
 
-LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) {
-  auto ret = pos.PrevStart().End();
-  if (data()->IsBlockBoundary(pos.Start())) {
-    ret = pos.Start();
-  }
-  DCHECK(ret <= pos);
-  return ret;
+  TRACE("End allocating function %s with the Greedy Allocator\n",
+        data()->debug_name());
 }
 
-LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
-    LiveRange* range, bool* is_spill_pos) {
-  auto start = range->Start();
-  auto end = range->End();
 
-  UsePosition* next_reg_use = range->first_pos();
-  while (next_reg_use != nullptr &&
-         next_reg_use->type() != UsePositionType::kRequiresRegister) {
-    next_reg_use = next_reg_use->next();
-  }
+void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
+  // The live range weight will be invalidated when ranges are created or split.
+  // Otherwise, it is consistently updated when the range is allocated or
+  // unallocated.
+  if (range->weight() != LiveRange::kInvalidWeight) return;
 
-  if (next_reg_use == nullptr) {
-    *is_spill_pos = true;
-    auto ret = FindOptimalSpillingPos(range, start);
-    DCHECK(ret.IsValid());
-    return ret;
+  if (range->IsFixed()) {
+    range->set_weight(LiveRange::kMaxWeight);
+    return;
   }
-
-  *is_spill_pos = false;
-  auto reg_pos = next_reg_use->pos();
-  auto correct_pos = GetSplittablePos(reg_pos);
-  if (start < correct_pos && correct_pos < end) {
-    return correct_pos;
+  if (!IsProgressPossible(range, code())) {
+    range->set_weight(LiveRange::kMaxWeight);
+    return;
   }
 
-  if (correct_pos >= end) {
-    return LifetimePosition::Invalid();
+  float use_count = 0.0;
+  for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
+    ++use_count;
   }
+  range->set_weight(use_count / static_cast<float>(range->GetSize()));
+}
 
-  // Correct_pos must be at or before start. Find the next use position.
-  next_reg_use = next_reg_use->next();
-  auto reference = reg_pos;
-  while (next_reg_use != nullptr) {
-    auto pos = next_reg_use->pos();
-    // Skip over tight successive uses.
-    if (reference.NextStart() < pos) {
-      break;
-    }
-    reference = pos;
-    next_reg_use = next_reg_use->next();
-  }
 
-  if (next_reg_use == nullptr) {
-    // While there may not be another use, we may still have space in the range
-    // to clip off.
-    correct_pos = reference.NextStart();
-    if (start < correct_pos && correct_pos < end) {
-      return correct_pos;
-    }
-    return LifetimePosition::Invalid();
-  }
+void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
+  LifetimePosition start = range->Start();
+  CHECK(range->CanBeSpilled(start));
 
-  correct_pos = GetSplittablePos(next_reg_use->pos());
-  if (start < correct_pos && correct_pos < end) {
-    DCHECK(reference < correct_pos);
-    return correct_pos;
-  }
-  return LifetimePosition::Invalid();
+  DCHECK(range->NextRegisterPosition(start) == nullptr);
+  Spill(range);
 }
 
 
-void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
-                                           LifetimePosition pos, bool spill) {
-  auto tail = SplitRangeAt(current, pos);
-  if (spill) {
-    Spill(tail);
-  } else {
-    Enqueue(tail);
-  }
-  if (tail != current) {
-    Enqueue(current);
+void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
+  // TODO(mtrofin): replace the call below with the entry point selecting
+  // heuristics, once they exist, out of which GLRSP is the last one.
+  auto pos = GetLastResortSplitPosition(range, code());
+  if (pos.IsValid()) {
+    LiveRange* tail = Split(range, data(), pos);
+    DCHECK(tail != range);
+    scheduler().Schedule(tail);
+    scheduler().Schedule(range);
+    return;
   }
+  SpillRangeAsLastResort(range);
 }
 
 
index 5bc7ce343d778dbd2126bb4adb0d6ecf1e4ade27..3ec180b2ba5d0dc82aa732e1ac23e0140358d661 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef V8_GREEDY_ALLOCATOR_H_
 #define V8_GREEDY_ALLOCATOR_H_
 
+#include "src/compiler/coalesced-live-ranges.h"
 #include "src/compiler/register-allocator.h"
 #include "src/zone-containers.h"
 
@@ -12,7 +13,43 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-class CoalescedLiveRanges;
+
+// The object of allocation scheduling. At minimum, this is a LiveRange, but
+// we may extend this to groups of LiveRanges. It has to be comparable.
+class AllocationCandidate {
+ public:
+  explicit AllocationCandidate(LiveRange* range) : range_(range) {}
+
+  // Strict ordering operators
+  bool operator<(const AllocationCandidate& other) const {
+    return range_->GetSize() < other.range_->GetSize();
+  }
+
+  bool operator>(const AllocationCandidate& other) const {
+    return range_->GetSize() > other.range_->GetSize();
+  }
+
+  LiveRange* live_range() const { return range_; }
+
+ private:
+  LiveRange* range_;
+};
+
+
+// Schedule processing (allocating) of AllocationCandidates.
+class AllocationScheduler final : ZoneObject {
+ public:
+  explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
+  void Schedule(LiveRange* range);
+  AllocationCandidate GetNext();
+  bool empty() const { return queue_.empty(); }
+
+ private:
+  typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
+  ScheduleQueue queue_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
+};
 
 
 // A variant of the LLVM Greedy Register Allocator. See
@@ -25,39 +62,47 @@ class GreedyAllocator final : public RegisterAllocator {
   void AllocateRegisters();
 
  private:
-  LifetimePosition GetSplittablePos(LifetimePosition pos);
-  const RegisterConfiguration* config() const { return data()->config(); }
+  AllocationScheduler& scheduler() { return scheduler_; }
+  CoalescedLiveRanges* current_allocations(unsigned i) {
+    return allocations_[i];
+  }
   Zone* local_zone() const { return local_zone_; }
 
-  int GetHintedRegister(LiveRange* range);
+  // Insert fixed ranges.
+  void PreallocateFixedRanges();
 
-  typedef ZonePriorityQueue<std::pair<unsigned, LiveRange*>> PQueue;
+  // Schedule unassigned live ranges for allocation.
+  // TODO(mtrofin): groups.
+  void ScheduleAllocationCandidates();
 
-  unsigned GetLiveRangeSize(LiveRange* range);
-  void Enqueue(LiveRange* range);
+  // Find the optimal split for ranges defined by a memory operand, e.g.
+  // constants or function parameters passed on the stack.
+  void SplitAndSpillRangesDefinedByMemoryOperand();
 
-  void Evict(LiveRange* range);
-  float CalculateSpillWeight(LiveRange* range);
-  float CalculateMaxSpillWeight(const ZoneSet<LiveRange*>& ranges);
+  void TryAllocateCandidate(const AllocationCandidate& candidate);
+  void TryAllocateLiveRange(LiveRange* range);
 
+  bool CanProcessRange(LiveRange* range) const {
+    return range != nullptr && !range->IsEmpty() && range->kind() == mode();
+  }
 
-  bool TryAllocate(LiveRange* current, ZoneSet<LiveRange*>* conflicting);
-  bool TryAllocatePhysicalRegister(unsigned reg_id, LiveRange* range,
-                                   ZoneSet<LiveRange*>* conflicting);
-  bool HandleSpillOperands(LiveRange* range);
-  void AllocateBlockedRange(LiveRange* current, LifetimePosition pos,
-                            bool spill);
+  // Calculate the weight of a candidate for allocation.
+  void EnsureValidRangeWeight(LiveRange* range);
 
-  LiveRange* SpillBetweenUntil(LiveRange* range, LifetimePosition start,
-                               LifetimePosition until, LifetimePosition end);
-  void AssignRangeToRegister(int reg_id, LiveRange* range);
+  // Calculate the new weight of a range that is about to be allocated.
+  float GetAllocatedRangeWeight(float candidate_weight);
+
+  // This is the extension point for splitting heuristics.
+  void SplitOrSpillBlockedRange(LiveRange* range);
 
-  LifetimePosition FindProgressingSplitPosition(LiveRange* range,
-                                                bool* is_spill_pos);
+  // Necessary heuristic: spill when all else failed.
+  void SpillRangeAsLastResort(LiveRange* range);
+
+  void AssignRangeToRegister(int reg_id, LiveRange* range);
 
   Zone* local_zone_;
   ZoneVector<CoalescedLiveRanges*> allocations_;
-  PQueue queue_;
+  AllocationScheduler scheduler_;
   DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
 };
 }  // namespace compiler
index 0a6f16072d433fd7f73c977499e3464a6f92a3fd..5bf858a86cf6e7551ce7a033603cc7c17990cbe2 100644 (file)
@@ -237,6 +237,10 @@ struct LiveRange::SpillAtDefinitionList : ZoneObject {
 };
 
 
+const float LiveRange::kInvalidWeight = -1;
+const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
+
+
 LiveRange::LiveRange(int id, MachineType machine_type)
     : id_(id),
       spill_start_index_(kMaxInt),
@@ -250,7 +254,9 @@ LiveRange::LiveRange(int id, MachineType machine_type)
       spills_at_definition_(nullptr),
       current_interval_(nullptr),
       last_processed_use_(nullptr),
-      current_hint_position_(nullptr) {
+      current_hint_position_(nullptr),
+      size_(kInvalidSize),
+      weight_(kInvalidWeight) {
   DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
   bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
           AssignedRegisterField::encode(kUnassignedRegister) |
@@ -559,6 +565,10 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
   result->next_ = next_;
   next_ = result;
 
+  // Invalidate size and weight of this range. The child range has them
+  // invalid at construction.
+  size_ = kInvalidSize;
+  weight_ = kInvalidWeight;
 #ifdef DEBUG
   Verify();
   result->Verify();
@@ -749,6 +759,19 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
 }
 
 
+unsigned LiveRange::GetSize() {
+  if (size_ == kInvalidSize) {
+    size_ = 0;
+    for (auto interval = first_interval(); interval != nullptr;
+         interval = interval->next()) {
+      size_ += (interval->end().value() - interval->start().value());
+    }
+  }
+
+  return static_cast<unsigned>(size_);
+}
+
+
 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
                                         UseInterval* interval2) {
   while (interval1 != nullptr && interval2 != nullptr) {
@@ -1852,6 +1875,15 @@ const ZoneVector<LiveRange*>& RegisterAllocator::GetFixedRegisters() const {
 }
 
 
+const char* RegisterAllocator::RegisterName(int allocation_index) const {
+  if (mode() == GENERAL_REGISTERS) {
+    return data()->config()->general_register_name(allocation_index);
+  } else {
+    return data()->config()->double_register_name(allocation_index);
+  }
+}
+
+
 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
                                          RegisterKind kind, Zone* local_zone)
     : RegisterAllocator(data, kind),
@@ -1958,15 +1990,6 @@ void LinearScanAllocator::AllocateRegisters() {
 }
 
 
-const char* LinearScanAllocator::RegisterName(int allocation_index) const {
-  if (mode() == GENERAL_REGISTERS) {
-    return data()->config()->general_register_name(allocation_index);
-  } else {
-    return data()->config()->double_register_name(allocation_index);
-  }
-}
-
-
 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
                                                        int reg) {
   data()->MarkAllocated(range->kind(), reg);
index 5a09c012d8234f42fb24db895130328b9be35b81..83f95cbac601608fe178fbcb7880bf77ad87191c 100644 (file)
@@ -428,6 +428,15 @@ class LiveRange final : public ZoneObject {
     return spills_at_definition_;
   }
 
+  // Used solely by the Greedy Allocator:
+  unsigned GetSize();
+  float weight() const { return weight_; }
+  void set_weight(float weight) { weight_ = weight; }
+
+  static const int kInvalidSize = -1;
+  static const float kInvalidWeight;
+  static const float kMaxWeight;
+
  private:
   void set_spill_type(SpillType value) {
     bits_ = SpillTypeField::update(bits_, value);
@@ -468,6 +477,14 @@ class LiveRange final : public ZoneObject {
   // This is used as a cache, it's invalid outside of BuildLiveRanges.
   mutable UsePosition* current_hint_position_;
 
+  // greedy: the number of LifetimePositions covered by this range. Used to
+  // prioritize selecting live ranges for register assignment, as well as
+  // in weight calculations.
+  int size_;
+
+  // greedy: a metric for resolving conflicts between ranges with an assigned
+  // register and ranges that intersect them and need a register.
+  float weight_;
   DISALLOW_COPY_AND_ASSIGN(LiveRange);
 };
 
@@ -770,6 +787,7 @@ class RegisterAllocator : public ZoneObject {
                                           LifetimePosition pos);
 
   const ZoneVector<LiveRange*>& GetFixedRegisters() const;
+  const char* RegisterName(int allocation_index) const;
 
  private:
   RegisterAllocationData* const data_;
@@ -789,8 +807,6 @@ class LinearScanAllocator final : public RegisterAllocator {
   void AllocateRegisters();
 
  private:
-  const char* RegisterName(int allocation_index) const;
-
   ZoneVector<LiveRange*>& unhandled_live_ranges() {
     return unhandled_live_ranges_;
   }
index 98149b753c9a42e8b28cb7920e6ddb0376a3e9f3..22a301c1ed4db41fd0873e248ed417605faa2f49 100644 (file)
         '../../src/compiler/basic-block-instrumentor.h',
         '../../src/compiler/change-lowering.cc',
         '../../src/compiler/change-lowering.h',
+        '../../src/compiler/coalesced-live-ranges.cc',
+        '../../src/compiler/coalesced-live-ranges.h',
         '../../src/compiler/code-generator-impl.h',
         '../../src/compiler/code-generator.cc',
         '../../src/compiler/code-generator.h',