Separated the new register allocator in its own files.
authormtrofin <mtrofin@chromium.org>
Tue, 16 Jun 2015 17:10:14 +0000 (10:10 -0700)
committerCommit bot <commit-bot@chromium.org>
Tue, 16 Jun 2015 17:10:31 +0000 (17:10 +0000)
Opportunistically removed GreedyAllocator::TryReuseSpillForPhi because it is actually unsuitable for Greedy. It was copied from Linear and it relies on hints, however, the current implementation of hints assumes linear scan.

This change doesn't aim to address performance nor correctness for Greedy.

BUG=

Review URL: https://codereview.chromium.org/1184183002

Cr-Commit-Position: refs/heads/master@{#29054}

BUILD.gn
src/compiler/greedy-allocator.cc [new file with mode: 0644]
src/compiler/greedy-allocator.h [new file with mode: 0644]
src/compiler/pipeline.cc
src/compiler/register-allocator.cc
src/compiler/register-allocator.h
tools/gyp/v8.gyp

index 02d60bd189c4ee541f89a8b70b017a3e81ccb047..3e652cf7c396894b3c89becace141977ce8528b3 100644 (file)
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -646,6 +646,8 @@ source_set("v8_base") {
     "src/compiler/graph-visualizer.h",
     "src/compiler/graph.cc",
     "src/compiler/graph.h",
+    "src/compiler/greedy-allocator.cc",
+    "src/compiler/greedy-allocator.h",
     "src/compiler/instruction-codes.h",
     "src/compiler/instruction-selector-impl.h",
     "src/compiler/instruction-selector.cc",
diff --git a/src/compiler/greedy-allocator.cc b/src/compiler/greedy-allocator.cc
new file mode 100644 (file)
index 0000000..0aedb58
--- /dev/null
@@ -0,0 +1,460 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(...)                             \
+  do {                                         \
+    if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+  } while (false)
+
+
+class CoalescedLiveRanges : public ZoneObject {
+ public:
+  explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
+
+  LiveRange* Find(UseInterval* query) {
+    ZoneSplayTree<Config>::Locator locator;
+
+    if (storage_.Find(GetKey(query), &locator)) {
+      return locator.value();
+    }
+    return nullptr;
+  }
+
+  // TODO(mtrofin): Change to void returning if we do not care if the interval
+  // was previously added.
+  bool Insert(LiveRange* range) {
+    auto* interval = range->first_interval();
+    while (interval != nullptr) {
+      if (!Insert(interval, range)) return false;
+      interval = interval->next();
+    }
+    return true;
+  }
+
+  bool Remove(LiveRange* range) {
+    bool ret = false;
+    auto* segment = range->first_interval();
+    while (segment != nullptr) {
+      ret |= Remove(segment);
+      segment = segment->next();
+    }
+    return ret;
+  }
+
+  bool IsEmpty() { return storage_.is_empty(); }
+
+ private:
+  struct Config {
+    typedef std::pair<int, int> Key;
+    typedef LiveRange* Value;
+    static const Key kNoKey;
+    static Value NoValue() { return nullptr; }
+    static int Compare(const Key& a, const Key& b) {
+      if (a.second <= b.first) return -1;
+      if (a.first >= b.second) return 1;
+      return 0;
+    }
+  };
+
+  Config::Key GetKey(UseInterval* interval) {
+    if (interval == nullptr) return std::make_pair(0, 0);
+    return std::make_pair(interval->start().value(), interval->end().value());
+  }
+
+  // TODO(mtrofin): Change to void returning if we do not care if the interval
+  // was previously added.
+  bool Insert(UseInterval* interval, LiveRange* range) {
+    ZoneSplayTree<Config>::Locator locator;
+    bool ret = storage_.Insert(GetKey(interval), &locator);
+    if (ret) locator.set_value(range);
+    return ret;
+  }
+
+  bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
+
+  ZoneSplayTree<Config> storage_;
+  DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
+};
+
+
+const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0};
+
+GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
+                                 RegisterKind kind, Zone* local_zone)
+    : RegisterAllocator(data, kind),
+      local_zone_(local_zone),
+      allocations_(local_zone),
+      queue_(local_zone) {}
+
+
+unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
+  auto interval = range->first_interval();
+  if (interval == nullptr) return 0;
+
+  unsigned size = 0;
+  while (interval != nullptr) {
+    size += (interval->end().value() - interval->start().value());
+    interval = interval->next();
+  }
+
+  return size;
+}
+
+
+void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
+  allocations_[reg_id]->Insert(range);
+  if (range->HasRegisterAssigned()) {
+    DCHECK_EQ(reg_id, range->assigned_register());
+    return;
+  }
+  range->set_assigned_register(reg_id);
+  range->SetUseHints(reg_id);
+  if (range->is_phi()) {
+    data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
+  }
+}
+
+
+float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
+  InstructionOperand* first_hint = nullptr;
+  if (range->FirstHintPosition() != nullptr) {
+    first_hint = range->FirstHintPosition()->operand();
+  }
+
+  if (range->IsFixed()) return std::numeric_limits<float>::max();
+  bool spill;
+  if (!FindProgressingSplitPosition(range, &spill).IsValid())
+    return std::numeric_limits<float>::max();
+
+  float multiplier = 1.0;
+  if (first_hint != nullptr && first_hint->IsRegister()) {
+    multiplier = 3.0;
+  }
+
+  unsigned use_count = 0;
+  auto* pos = range->first_pos();
+  while (pos != nullptr) {
+    use_count++;
+    pos = pos->next();
+  }
+
+  unsigned range_size = GetLiveRangeSize(range);
+  DCHECK_NE(0U, range_size);
+
+  return multiplier * static_cast<float>(use_count) /
+         static_cast<float>(range_size);
+}
+
+
+float GreedyAllocator::CalculateMaxSpillWeight(
+    const ZoneSet<LiveRange*>& ranges) {
+  float max = 0.0;
+  for (auto* r : ranges) {
+    max = std::max(max, CalculateSpillWeight(r));
+  }
+  return max;
+}
+
+
+void GreedyAllocator::Evict(LiveRange* range) {
+  bool removed = allocations_[range->assigned_register()]->Remove(range);
+  CHECK(removed);
+  range->UnsetUseHints();
+  range->UnsetAssignedRegister();
+  if (range->is_phi()) {
+    data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
+  }
+}
+
+
+bool GreedyAllocator::TryAllocatePhysicalRegister(
+    unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
+  auto* segment = range->first_interval();
+
+  auto* alloc_info = allocations_[reg_id];
+  while (segment != nullptr) {
+    if (auto* existing = alloc_info->Find(segment)) {
+      DCHECK(existing->HasRegisterAssigned());
+      conflicting->insert(existing);
+    }
+    segment = segment->next();
+  }
+  if (!conflicting->empty()) return false;
+  // No conflicts means we can safely allocate this register to this range.
+  AssignRangeToRegister(reg_id, range);
+  return true;
+}
+
+
+int GreedyAllocator::GetHintedRegister(LiveRange* range) {
+  int reg;
+  if (range->FirstHintPosition(&reg) != nullptr) {
+    return reg;
+  }
+  return -1;
+}
+
+
+bool GreedyAllocator::TryAllocate(LiveRange* current,
+                                  ZoneSet<LiveRange*>* conflicting) {
+  if (current->IsFixed()) {
+    return TryAllocatePhysicalRegister(current->assigned_register(), current,
+                                       conflicting);
+  }
+
+  int hinted_reg_id = GetHintedRegister(current);
+  if (hinted_reg_id >= 0) {
+    if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
+      return true;
+    }
+  }
+
+  ZoneSet<LiveRange*> local_conflicts(local_zone());
+  for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
+       candidate_reg++) {
+    if (hinted_reg_id >= 0 &&
+        candidate_reg == static_cast<size_t>(hinted_reg_id))
+      continue;
+    local_conflicts.clear();
+    if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
+      conflicting->clear();
+      return true;
+    } else {
+      conflicting->insert(local_conflicts.begin(), local_conflicts.end());
+    }
+  }
+  return false;
+}
+
+
+LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
+                                              LifetimePosition start,
+                                              LifetimePosition until,
+                                              LifetimePosition end) {
+  CHECK(start < end);
+  auto second_part = SplitRangeAt(range, start);
+
+  if (second_part->Start() < end) {
+    // The split result intersects with [start, end[.
+    // Split it at position between ]start+1, end[, spill the middle part
+    // and put the rest to unhandled.
+    auto third_part_end = end.PrevStart().End();
+    if (data()->IsBlockBoundary(end.Start())) {
+      third_part_end = end.Start();
+    }
+    auto third_part = SplitBetween(
+        second_part, Max(second_part->Start().End(), until), third_part_end);
+
+    DCHECK(third_part != second_part);
+
+    Spill(second_part);
+    return third_part;
+  } else {
+    // The split result does not intersect with [start, end[.
+    // Nothing to spill. Just return it for re-processing.
+    return second_part;
+  }
+}
+
+
+void GreedyAllocator::Enqueue(LiveRange* range) {
+  if (range == nullptr || range->IsEmpty()) return;
+  unsigned size = GetLiveRangeSize(range);
+  TRACE("Enqueuing range %d\n", range->id());
+  queue_.push(std::make_pair(size, range));
+}
+
+
+bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
+  auto position = range->Start();
+  TRACE("Processing interval %d start=%d\n", range->id(), position.value());
+
+  if (!range->HasNoSpillType()) {
+    TRACE("Live range %d already has a spill operand\n", range->id());
+    auto next_pos = position;
+    if (next_pos.IsGapPosition()) {
+      next_pos = next_pos.NextStart();
+    }
+    auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+    // If the range already has a spill operand and it doesn't need a
+    // register immediately, split it and spill the first part of the range.
+    if (pos == nullptr) {
+      Spill(range);
+      return true;
+    } else if (pos->pos() > range->Start().NextStart()) {
+      // Do not spill live range eagerly if use position that can benefit from
+      // the register is too close to the start of live range.
+      auto* reminder = SpillBetweenUntil(range, position, position, pos->pos());
+      Enqueue(reminder);
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void GreedyAllocator::AllocateRegisters() {
+  for (auto range : data()->live_ranges()) {
+    if (range == nullptr) continue;
+    if (range->kind() == mode()) {
+      DCHECK(!range->HasRegisterAssigned() && !range->spilled());
+      TRACE("Enqueueing live range %d to priority queue \n", range->id());
+      Enqueue(range);
+    }
+  }
+
+  allocations_.resize(num_registers());
+  for (int i = 0; i < num_registers(); i++) {
+    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
+  }
+
+  for (auto* current : GetFixedRegisters()) {
+    if (current != nullptr) {
+      DCHECK_EQ(mode(), current->kind());
+      int reg_nr = current->assigned_register();
+      bool inserted = allocations_[reg_nr]->Insert(current);
+      CHECK(inserted);
+    }
+  }
+
+  while (!queue_.empty()) {
+    auto current_pair = queue_.top();
+    queue_.pop();
+    auto current = current_pair.second;
+    if (HandleSpillOperands(current)) {
+      continue;
+    }
+    bool spill = false;
+    ZoneSet<LiveRange*> conflicting(local_zone());
+    if (!TryAllocate(current, &conflicting)) {
+      DCHECK(!conflicting.empty());
+      float this_weight = std::numeric_limits<float>::max();
+      LifetimePosition split_pos =
+          FindProgressingSplitPosition(current, &spill);
+      if (split_pos.IsValid()) {
+        this_weight = CalculateSpillWeight(current);
+      }
+
+      bool evicted = false;
+      for (auto* conflict : conflicting) {
+        if (CalculateSpillWeight(conflict) < this_weight) {
+          Evict(conflict);
+          Enqueue(conflict);
+          evicted = true;
+        }
+      }
+      if (evicted) {
+        conflicting.clear();
+        TryAllocate(current, &conflicting);
+      }
+      if (!conflicting.empty()) {
+        DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
+        DCHECK(split_pos.IsValid());
+        AllocateBlockedRange(current, split_pos, spill);
+      }
+    }
+  }
+
+  for (size_t i = 0; i < allocations_.size(); ++i) {
+    if (!allocations_[i]->IsEmpty()) {
+      data()->MarkAllocated(mode(), static_cast<int>(i));
+    }
+  }
+}
+
+
+LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) {
+  auto ret = pos.PrevStart().End();
+  if (data()->IsBlockBoundary(pos.Start())) {
+    ret = pos.Start();
+  }
+  DCHECK(ret <= pos);
+  return ret;
+}
+
+LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
+    LiveRange* range, bool* is_spill_pos) {
+  auto start = range->Start();
+  auto end = range->End();
+
+  UsePosition* next_reg_use = range->first_pos();
+  while (next_reg_use != nullptr &&
+         next_reg_use->type() != UsePositionType::kRequiresRegister) {
+    next_reg_use = next_reg_use->next();
+  }
+
+  if (next_reg_use == nullptr) {
+    *is_spill_pos = true;
+    auto ret = FindOptimalSpillingPos(range, start);
+    DCHECK(ret.IsValid());
+    return ret;
+  }
+
+  *is_spill_pos = false;
+  auto reg_pos = next_reg_use->pos();
+  auto correct_pos = GetSplittablePos(reg_pos);
+  if (start < correct_pos && correct_pos < end) {
+    return correct_pos;
+  }
+
+  if (correct_pos >= end) {
+    return LifetimePosition::Invalid();
+  }
+
+  // Correct_pos must be at or before start. Find the next use position.
+  next_reg_use = next_reg_use->next();
+  auto reference = reg_pos;
+  while (next_reg_use != nullptr) {
+    auto pos = next_reg_use->pos();
+    // Skip over tight successive uses.
+    if (reference.NextStart() < pos) {
+      break;
+    }
+    reference = pos;
+    next_reg_use = next_reg_use->next();
+  }
+
+  if (next_reg_use == nullptr) {
+    // While there may not be another use, we may still have space in the range
+    // to clip off.
+    correct_pos = reference.NextStart();
+    if (start < correct_pos && correct_pos < end) {
+      return correct_pos;
+    }
+    return LifetimePosition::Invalid();
+  }
+
+  correct_pos = GetSplittablePos(next_reg_use->pos());
+  if (start < correct_pos && correct_pos < end) {
+    DCHECK(reference < correct_pos);
+    return correct_pos;
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
+                                           LifetimePosition pos, bool spill) {
+  auto tail = SplitRangeAt(current, pos);
+  if (spill) {
+    Spill(tail);
+  } else {
+    Enqueue(tail);
+  }
+  if (tail != current) {
+    Enqueue(current);
+  }
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/greedy-allocator.h b/src/compiler/greedy-allocator.h
new file mode 100644 (file)
index 0000000..5bc7ce3
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_GREEDY_ALLOCATOR_H_
+#define V8_GREEDY_ALLOCATOR_H_
+
+#include "src/compiler/register-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CoalescedLiveRanges;
+
+
+// A variant of the LLVM Greedy Register Allocator. See
+// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
+class GreedyAllocator final : public RegisterAllocator {
+ public:
+  explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
+                           Zone* local_zone);
+
+  void AllocateRegisters();
+
+ private:
+  LifetimePosition GetSplittablePos(LifetimePosition pos);
+  const RegisterConfiguration* config() const { return data()->config(); }
+  Zone* local_zone() const { return local_zone_; }
+
+  int GetHintedRegister(LiveRange* range);
+
+  typedef ZonePriorityQueue<std::pair<unsigned, LiveRange*>> PQueue;
+
+  unsigned GetLiveRangeSize(LiveRange* range);
+  void Enqueue(LiveRange* range);
+
+  void Evict(LiveRange* range);
+  float CalculateSpillWeight(LiveRange* range);
+  float CalculateMaxSpillWeight(const ZoneSet<LiveRange*>& ranges);
+
+
+  bool TryAllocate(LiveRange* current, ZoneSet<LiveRange*>* conflicting);
+  bool TryAllocatePhysicalRegister(unsigned reg_id, LiveRange* range,
+                                   ZoneSet<LiveRange*>* conflicting);
+  bool HandleSpillOperands(LiveRange* range);
+  void AllocateBlockedRange(LiveRange* current, LifetimePosition pos,
+                            bool spill);
+
+  LiveRange* SpillBetweenUntil(LiveRange* range, LifetimePosition start,
+                               LifetimePosition until, LifetimePosition end);
+  void AssignRangeToRegister(int reg_id, LiveRange* range);
+
+  LifetimePosition FindProgressingSplitPosition(LiveRange* range,
+                                                bool* is_spill_pos);
+
+  Zone* local_zone_;
+  ZoneVector<CoalescedLiveRanges*> allocations_;
+  PQueue queue_;
+  DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_GREEDY_ALLOCATOR_H_
index e7c76aabc3934d7212c714197bd75ba34470b87a..1dee5ae0c7f7fc80d2a4874b03ce4e0b1d2be2f2 100644 (file)
@@ -20,6 +20,7 @@
 #include "src/compiler/frame-elider.h"
 #include "src/compiler/graph-replay.h"
 #include "src/compiler/graph-visualizer.h"
+#include "src/compiler/greedy-allocator.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/js-builtin-reducer.h"
index 08b71f2e40488181657a064418bff797d9186b9c..0a6f16072d433fd7f73c977499e3464a6f92a3fd 100644 (file)
@@ -32,13 +32,6 @@ int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
 }
 
 
-const ZoneVector<LiveRange*>& GetFixedRegisters(
-    const RegisterAllocationData* data, RegisterKind kind) {
-  return kind == DOUBLE_REGISTERS ? data->fixed_double_live_ranges()
-                                  : data->fixed_live_ranges();
-}
-
-
 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
                                           const InstructionBlock* block) {
   auto index = block->loop_header();
@@ -53,13 +46,6 @@ const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
 }
 
 
-bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) {
-  return pos.IsFullStart() &&
-         code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
-             pos.ToInstructionIndex();
-}
-
-
 Instruction* GetLastInstruction(InstructionSequence* code,
                                 const InstructionBlock* block) {
   return code->InstructionAt(block->last_instruction_index());
@@ -1071,6 +1057,13 @@ void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
 }
 
 
+bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
+  return pos.IsFullStart() &&
+         code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
+             pos.ToInstructionIndex();
+}
+
+
 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
     : data_(data) {}
 
@@ -1853,6 +1846,12 @@ void RegisterAllocator::Spill(LiveRange* range) {
 }
 
 
+const ZoneVector<LiveRange*>& RegisterAllocator::GetFixedRegisters() const {
+  return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
+                                    : data()->fixed_live_ranges();
+}
+
+
 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
                                          RegisterKind kind, Zone* local_zone)
     : RegisterAllocator(data, kind),
@@ -1884,7 +1883,7 @@ void LinearScanAllocator::AllocateRegisters() {
   SortUnhandled();
   DCHECK(UnhandledIsSorted());
 
-  auto& fixed_ranges = GetFixedRegisters(data(), mode());
+  auto& fixed_ranges = GetFixedRegisters();
   for (auto current : fixed_ranges) {
     if (current != nullptr) {
       DCHECK_EQ(mode(), current->kind());
@@ -2380,7 +2379,7 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
     // Split it at position between ]start+1, end[, spill the middle part
     // and put the rest to unhandled.
     auto third_part_end = end.PrevStart().End();
-    if (IsBlockBoundary(code(), end.Start())) {
+    if (data()->IsBlockBoundary(end.Start())) {
       third_part_end = end.Start();
     }
     auto third_part = SplitBetween(
@@ -2398,446 +2397,6 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
 }
 
 
-class CoalescedLiveRanges : public ZoneObject {
- public:
-  explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
-
-  LiveRange* Find(UseInterval* query) {
-    ZoneSplayTree<Config>::Locator locator;
-
-    if (storage_.Find(GetKey(query), &locator)) {
-      return locator.value();
-    }
-    return nullptr;
-  }
-
-  // TODO(mtrofin): Change to void returning if we do not care if the interval
-  // was previously added.
-  bool Insert(LiveRange* range) {
-    auto* interval = range->first_interval();
-    while (interval != nullptr) {
-      if (!Insert(interval, range)) return false;
-      interval = interval->next();
-    }
-    return true;
-  }
-
-  bool Remove(LiveRange* range) {
-    bool ret = false;
-    auto* segment = range->first_interval();
-    while (segment != nullptr) {
-      ret |= Remove(segment);
-      segment = segment->next();
-    }
-    return ret;
-  }
-
-  bool IsEmpty() { return storage_.is_empty(); }
-
- private:
-  struct Config {
-    typedef std::pair<int, int> Key;
-    typedef LiveRange* Value;
-    static const Key kNoKey;
-    static Value NoValue() { return nullptr; }
-    static int Compare(const Key& a, const Key& b) {
-      if (a.second <= b.first) return -1;
-      if (a.first >= b.second) return 1;
-      return 0;
-    }
-  };
-
-  Config::Key GetKey(UseInterval* interval) {
-    if (interval == nullptr) return std::make_pair(0, 0);
-    return std::make_pair(interval->start().value(), interval->end().value());
-  }
-
-  // TODO(mtrofin): Change to void returning if we do not care if the interval
-  // was previously added.
-  bool Insert(UseInterval* interval, LiveRange* range) {
-    ZoneSplayTree<Config>::Locator locator;
-    bool ret = storage_.Insert(GetKey(interval), &locator);
-    if (ret) locator.set_value(range);
-    return ret;
-  }
-
-  bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
-
-  ZoneSplayTree<Config> storage_;
-  DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
-
-
-const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0};
-
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
-                                 RegisterKind kind, Zone* local_zone)
-    : RegisterAllocator(data, kind),
-      local_zone_(local_zone),
-      allocations_(local_zone),
-      queue_(local_zone) {}
-
-
-unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
-  auto interval = range->first_interval();
-  if (interval == nullptr) return 0;
-
-  unsigned size = 0;
-  while (interval != nullptr) {
-    size += (interval->end().value() - interval->start().value());
-    interval = interval->next();
-  }
-
-  return size;
-}
-
-
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
-  allocations_[reg_id]->Insert(range);
-  if (range->HasRegisterAssigned()) {
-    DCHECK_EQ(reg_id, range->assigned_register());
-    return;
-  }
-  range->set_assigned_register(reg_id);
-  range->SetUseHints(reg_id);
-  if (range->is_phi()) {
-    data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
-  }
-}
-
-
-float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
-  InstructionOperand* first_hint = nullptr;
-  if (range->FirstHintPosition() != nullptr) {
-    first_hint = range->FirstHintPosition()->operand();
-  }
-
-  if (range->IsFixed()) return std::numeric_limits<float>::max();
-  bool spill;
-  if (!FindProgressingSplitPosition(range, &spill).IsValid())
-    return std::numeric_limits<float>::max();
-
-  float multiplier = 1.0;
-  if (first_hint != nullptr && first_hint->IsRegister()) {
-    multiplier = 3.0;
-  }
-
-  unsigned use_count = 0;
-  auto* pos = range->first_pos();
-  while (pos != nullptr) {
-    use_count++;
-    pos = pos->next();
-  }
-
-  unsigned range_size = GetLiveRangeSize(range);
-  DCHECK_NE(0U, range_size);
-
-  return multiplier * static_cast<float>(use_count) /
-         static_cast<float>(range_size);
-}
-
-
-float GreedyAllocator::CalculateMaxSpillWeight(
-    const ZoneSet<LiveRange*>& ranges) {
-  float max = 0.0;
-  for (auto* r : ranges) {
-    max = std::max(max, CalculateSpillWeight(r));
-  }
-  return max;
-}
-
-
-void GreedyAllocator::Evict(LiveRange* range) {
-  bool removed = allocations_[range->assigned_register()]->Remove(range);
-  CHECK(removed);
-  range->UnsetUseHints();
-  range->UnsetAssignedRegister();
-  if (range->is_phi()) {
-    data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
-  }
-}
-
-
-bool GreedyAllocator::TryAllocatePhysicalRegister(
-    unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
-  auto* segment = range->first_interval();
-
-  auto* alloc_info = allocations_[reg_id];
-  while (segment != nullptr) {
-    if (auto* existing = alloc_info->Find(segment)) {
-      DCHECK(existing->HasRegisterAssigned());
-      conflicting->insert(existing);
-    }
-    segment = segment->next();
-  }
-  if (!conflicting->empty()) return false;
-  // No conflicts means we can safely allocate this register to this range.
-  AssignRangeToRegister(reg_id, range);
-  return true;
-}
-
-
-int GreedyAllocator::GetHintedRegister(LiveRange* range) {
-  int reg;
-  if (range->FirstHintPosition(&reg) != nullptr) {
-    return reg;
-  }
-  return -1;
-}
-
-
-bool GreedyAllocator::TryAllocate(LiveRange* current,
-                                  ZoneSet<LiveRange*>* conflicting) {
-  if (current->IsFixed()) {
-    return TryAllocatePhysicalRegister(current->assigned_register(), current,
-                                       conflicting);
-  }
-
-  int hinted_reg_id = GetHintedRegister(current);
-  if (hinted_reg_id >= 0) {
-    if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
-      return true;
-    }
-  }
-
-  ZoneSet<LiveRange*> local_conflicts(local_zone());
-  for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
-       candidate_reg++) {
-    if (hinted_reg_id >= 0 &&
-        candidate_reg == static_cast<size_t>(hinted_reg_id))
-      continue;
-    local_conflicts.clear();
-    if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
-      conflicting->clear();
-      return true;
-    } else {
-      conflicting->insert(local_conflicts.begin(), local_conflicts.end());
-    }
-  }
-  return false;
-}
-
-
-LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
-                                              LifetimePosition start,
-                                              LifetimePosition until,
-                                              LifetimePosition end) {
-  CHECK(start < end);
-  auto second_part = SplitRangeAt(range, start);
-
-  if (second_part->Start() < end) {
-    // The split result intersects with [start, end[.
-    // Split it at position between ]start+1, end[, spill the middle part
-    // and put the rest to unhandled.
-    auto third_part_end = end.PrevStart().End();
-    if (IsBlockBoundary(code(), end.Start())) {
-      third_part_end = end.Start();
-    }
-    auto third_part = SplitBetween(
-        second_part, Max(second_part->Start().End(), until), third_part_end);
-
-    DCHECK(third_part != second_part);
-
-    Spill(second_part);
-    return third_part;
-  } else {
-    // The split result does not intersect with [start, end[.
-    // Nothing to spill. Just return it for re-processing.
-    return second_part;
-  }
-}
-
-
-void GreedyAllocator::Enqueue(LiveRange* range) {
-  if (range == nullptr || range->IsEmpty()) return;
-  unsigned size = GetLiveRangeSize(range);
-  TRACE("Enqueuing range %d\n", range->id());
-  queue_.push(std::make_pair(size, range));
-}
-
-
-bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
-  auto position = range->Start();
-  TRACE("Processing interval %d start=%d\n", range->id(), position.value());
-
-  if (!range->HasNoSpillType()) {
-    TRACE("Live range %d already has a spill operand\n", range->id());
-    auto next_pos = position;
-    if (next_pos.IsGapPosition()) {
-      next_pos = next_pos.NextStart();
-    }
-    auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
-    // If the range already has a spill operand and it doesn't need a
-    // register immediately, split it and spill the first part of the range.
-    if (pos == nullptr) {
-      Spill(range);
-      return true;
-    } else if (pos->pos() > range->Start().NextStart()) {
-      // Do not spill live range eagerly if use position that can benefit from
-      // the register is too close to the start of live range.
-      auto* reminder = SpillBetweenUntil(range, position, position, pos->pos());
-      Enqueue(reminder);
-      return true;
-    }
-  }
-  return TryReuseSpillForPhi(range);
-}
-
-
-void GreedyAllocator::AllocateRegisters() {
-  for (auto range : data()->live_ranges()) {
-    if (range == nullptr) continue;
-    if (range->kind() == mode()) {
-      DCHECK(!range->HasRegisterAssigned() && !range->spilled());
-      TRACE("Enqueueing live range %d to priority queue \n", range->id());
-      Enqueue(range);
-    }
-  }
-
-  allocations_.resize(num_registers());
-  for (int i = 0; i < num_registers(); i++) {
-    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
-  }
-
-  for (auto* current : GetFixedRegisters(data(), mode())) {
-    if (current != nullptr) {
-      DCHECK_EQ(mode(), current->kind());
-      int reg_nr = current->assigned_register();
-      bool inserted = allocations_[reg_nr]->Insert(current);
-      CHECK(inserted);
-    }
-  }
-
-  while (!queue_.empty()) {
-    auto current_pair = queue_.top();
-    queue_.pop();
-    auto current = current_pair.second;
-    if (HandleSpillOperands(current)) {
-      continue;
-    }
-    bool spill = false;
-    ZoneSet<LiveRange*> conflicting(local_zone());
-    if (!TryAllocate(current, &conflicting)) {
-      DCHECK(!conflicting.empty());
-      float this_weight = std::numeric_limits<float>::max();
-      LifetimePosition split_pos =
-          FindProgressingSplitPosition(current, &spill);
-      if (split_pos.IsValid()) {
-        this_weight = CalculateSpillWeight(current);
-      }
-
-      bool evicted = false;
-      for (auto* conflict : conflicting) {
-        if (CalculateSpillWeight(conflict) < this_weight) {
-          Evict(conflict);
-          Enqueue(conflict);
-          evicted = true;
-        }
-      }
-      if (evicted) {
-        conflicting.clear();
-        TryAllocate(current, &conflicting);
-      }
-      if (!conflicting.empty()) {
-        DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
-        DCHECK(split_pos.IsValid());
-        AllocateBlockedRange(current, split_pos, spill);
-      }
-    }
-  }
-
-  for (size_t i = 0; i < allocations_.size(); ++i) {
-    if (!allocations_[i]->IsEmpty()) {
-      data()->MarkAllocated(mode(), static_cast<int>(i));
-    }
-  }
-}
-
-
-LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) {
-  auto ret = pos.PrevStart().End();
-  if (IsBlockBoundary(code(), pos.Start())) {
-    ret = pos.Start();
-  }
-  DCHECK(ret <= pos);
-  return ret;
-}
-
-LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
-    LiveRange* range, bool* is_spill_pos) {
-  auto start = range->Start();
-  auto end = range->End();
-
-  UsePosition* next_reg_use = range->first_pos();
-  while (next_reg_use != nullptr &&
-         next_reg_use->type() != UsePositionType::kRequiresRegister) {
-    next_reg_use = next_reg_use->next();
-  }
-
-  if (next_reg_use == nullptr) {
-    *is_spill_pos = true;
-    auto ret = FindOptimalSpillingPos(range, start);
-    DCHECK(ret.IsValid());
-    return ret;
-  }
-
-  *is_spill_pos = false;
-  auto reg_pos = next_reg_use->pos();
-  auto correct_pos = GetSplittablePos(reg_pos);
-  if (start < correct_pos && correct_pos < end) {
-    return correct_pos;
-  }
-
-  if (correct_pos >= end) {
-    return LifetimePosition::Invalid();
-  }
-
-  // Correct_pos must be at or before start. Find the next use position.
-  next_reg_use = next_reg_use->next();
-  auto reference = reg_pos;
-  while (next_reg_use != nullptr) {
-    auto pos = next_reg_use->pos();
-    // Skip over tight successive uses.
-    if (reference.NextStart() < pos) {
-      break;
-    }
-    reference = pos;
-    next_reg_use = next_reg_use->next();
-  }
-
-  if (next_reg_use == nullptr) {
-    // While there may not be another use, we may still have space in the range
-    // to clip off.
-    correct_pos = reference.NextStart();
-    if (start < correct_pos && correct_pos < end) {
-      return correct_pos;
-    }
-    return LifetimePosition::Invalid();
-  }
-
-  correct_pos = GetSplittablePos(next_reg_use->pos());
-  if (start < correct_pos && correct_pos < end) {
-    DCHECK(reference < correct_pos);
-    return correct_pos;
-  }
-  return LifetimePosition::Invalid();
-}
-
-
-void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
-                                           LifetimePosition pos, bool spill) {
-  auto tail = SplitRangeAt(current, pos);
-  if (spill) {
-    Spill(tail);
-  } else {
-    Enqueue(tail);
-  }
-  if (tail != current) {
-    Enqueue(current);
-  }
-}
-
-
 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
     : data_(data) {}
 
@@ -2857,91 +2416,6 @@ void SpillSlotLocator::LocateSpillSlots() {
 }
 
 
-bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) {
-  if (range->IsChild() || !range->is_phi()) return false;
-  DCHECK(!range->HasSpillOperand());
-
-  auto phi_map_value = data()->GetPhiMapValueFor(range->id());
-  auto phi = phi_map_value->phi();
-  auto block = phi_map_value->block();
-  // Count the number of spilled operands.
-  size_t spilled_count = 0;
-  LiveRange* first_op = nullptr;
-  for (size_t i = 0; i < phi->operands().size(); i++) {
-    int op = phi->operands()[i];
-    LiveRange* op_range = LiveRangeFor(op);
-    if (!op_range->HasSpillRange()) continue;
-    auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
-    auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
-        pred->last_instruction_index());
-    while (op_range != nullptr && !op_range->CanCover(pred_end)) {
-      op_range = op_range->next();
-    }
-    if (op_range != nullptr && op_range->spilled()) {
-      spilled_count++;
-      if (first_op == nullptr) {
-        first_op = op_range->TopLevel();
-      }
-    }
-  }
-
-  // Only continue if more than half of the operands are spilled.
-  if (spilled_count * 2 <= phi->operands().size()) {
-    return false;
-  }
-
-  // Try to merge the spilled operands and count the number of merged spilled
-  // operands.
-  DCHECK(first_op != nullptr);
-  auto first_op_spill = first_op->GetSpillRange();
-  size_t num_merged = 1;
-  for (size_t i = 1; i < phi->operands().size(); i++) {
-    int op = phi->operands()[i];
-    auto op_range = LiveRangeFor(op);
-    if (!op_range->HasSpillRange()) continue;
-    auto op_spill = op_range->GetSpillRange();
-    if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
-      num_merged++;
-    }
-  }
-
-  // Only continue if enough operands could be merged to the
-  // same spill slot.
-  if (num_merged * 2 <= phi->operands().size() ||
-      AreUseIntervalsIntersecting(first_op_spill->interval(),
-                                  range->first_interval())) {
-    return false;
-  }
-
-  // If the range does not need register soon, spill it to the merged
-  // spill range.
-  auto next_pos = range->Start();
-  if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
-  auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
-  if (pos == nullptr) {
-    auto spill_range =
-        range->TopLevel()->HasSpillRange()
-            ? range->TopLevel()->GetSpillRange()
-            : data()->AssignSpillRangeToLiveRange(range->TopLevel());
-    bool merged = first_op_spill->TryMerge(spill_range);
-    CHECK(merged);
-    Spill(range);
-    return true;
-  } else if (pos->pos() > range->Start().NextStart()) {
-    auto spill_range =
-        range->TopLevel()->HasSpillRange()
-            ? range->TopLevel()->GetSpillRange()
-            : data()->AssignSpillRangeToLiveRange(range->TopLevel());
-    bool merged = first_op_spill->TryMerge(spill_range);
-    CHECK(merged);
-    Enqueue(
-        SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos()));
-    return true;
-  }
-  return false;
-}
-
-
 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
 
 
@@ -3329,7 +2803,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
       // boundary.
       if (second_range->spilled()) continue;
       if (first_range->End() != pos) continue;
-      if (IsBlockBoundary(code(), pos) &&
+      if (data()->IsBlockBoundary(pos) &&
           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
         continue;
       }
index baffedd919292c93e68b263793f3424c30aa0aaa..5a09c012d8234f42fb24db895130328b9be35b81 100644 (file)
@@ -607,6 +607,7 @@ class RegisterAllocationData final : public ZoneObject {
   PhiMapValue* InitializePhiMap(const InstructionBlock* block,
                                 PhiInstruction* phi);
   PhiMapValue* GetPhiMapValueFor(int virtual_register);
+  bool IsBlockBoundary(LifetimePosition pos) const;
 
  private:
   Zone* const allocation_zone_;
@@ -768,6 +769,8 @@ class RegisterAllocator : public ZoneObject {
   LifetimePosition FindOptimalSpillingPos(LiveRange* range,
                                           LifetimePosition pos);
 
+  const ZoneVector<LiveRange*>& GetFixedRegisters() const;
+
  private:
   RegisterAllocationData* const data_;
   const RegisterKind mode_;
@@ -840,55 +843,6 @@ class LinearScanAllocator final : public RegisterAllocator {
   DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
 };
 
-class CoalescedLiveRanges;
-
-
-// A variant of the LLVM Greedy Register Allocator. See
-// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
-class GreedyAllocator final : public RegisterAllocator {
- public:
-  explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
-                           Zone* local_zone);
-
-  void AllocateRegisters();
-
- private:
-  LifetimePosition GetSplittablePos(LifetimePosition pos);
-  const RegisterConfiguration* config() const { return data()->config(); }
-  Zone* local_zone() const { return local_zone_; }
-  bool TryReuseSpillForPhi(LiveRange* range);
-  int GetHintedRegister(LiveRange* range);
-
-  typedef ZonePriorityQueue<std::pair<unsigned, LiveRange*>> PQueue;
-
-  unsigned GetLiveRangeSize(LiveRange* range);
-  void Enqueue(LiveRange* range);
-
-  void Evict(LiveRange* range);
-  float CalculateSpillWeight(LiveRange* range);
-  float CalculateMaxSpillWeight(const ZoneSet<LiveRange*>& ranges);
-
-
-  bool TryAllocate(LiveRange* current, ZoneSet<LiveRange*>* conflicting);
-  bool TryAllocatePhysicalRegister(unsigned reg_id, LiveRange* range,
-                                   ZoneSet<LiveRange*>* conflicting);
-  bool HandleSpillOperands(LiveRange* range);
-  void AllocateBlockedRange(LiveRange* current, LifetimePosition pos,
-                            bool spill);
-
-  LiveRange* SpillBetweenUntil(LiveRange* range, LifetimePosition start,
-                               LifetimePosition until, LifetimePosition end);
-  void AssignRangeToRegister(int reg_id, LiveRange* range);
-
-  LifetimePosition FindProgressingSplitPosition(LiveRange* range,
-                                                bool* is_spill_pos);
-
-  Zone* local_zone_;
-  ZoneVector<CoalescedLiveRanges*> allocations_;
-  PQueue queue_;
-  DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
-};
-
 
 class SpillSlotLocator final : public ZoneObject {
  public:
index 621de130c6fdc489186569555c525d57aa3d9eaa..51c1ae421bcc1e1ea7e3402cb9026b40b27f78d8 100644 (file)
         '../../src/compiler/graph-visualizer.h',
         '../../src/compiler/graph.cc',
         '../../src/compiler/graph.h',
+        '../../src/compiler/greedy-allocator.cc',
+        '../../src/compiler/greedy-allocator.h',
         '../../src/compiler/instruction-codes.h',
         '../../src/compiler/instruction-selector-impl.h',
         '../../src/compiler/instruction-selector.cc',