[turbofan] add gap move verifier
authordcarney@chromium.org <dcarney@chromium.org>
Wed, 12 Nov 2014 14:53:30 +0000 (14:53 +0000)
committerdcarney@chromium.org <dcarney@chromium.org>
Wed, 12 Nov 2014 14:53:51 +0000 (14:53 +0000)
R=jarin@chromium.org

BUG=

Review URL: https://codereview.chromium.org/704193007

Cr-Commit-Position: refs/heads/master@{#25300}
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@25300 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/compiler/instruction.cc
src/compiler/instruction.h
src/compiler/pipeline.cc
src/compiler/register-allocator-verifier.cc
src/compiler/register-allocator-verifier.h
src/compiler/register-allocator.cc
src/compiler/register-allocator.h
test/cctest/compiler/test-run-machops.cc
test/unittests/compiler/register-allocator-unittest.cc

index d575be4..a713471 100644 (file)
@@ -435,8 +435,8 @@ InstructionSequence::InstructionSequence(Zone* instruction_zone,
 
 
 BlockStartInstruction* InstructionSequence::GetBlockStart(
-    BasicBlock::RpoNumber rpo) {
-  InstructionBlock* block = InstructionBlockAt(rpo);
+    BasicBlock::RpoNumber rpo) const {
+  const InstructionBlock* block = InstructionBlockAt(rpo);
   return BlockStartInstruction::cast(InstructionAt(block->code_start()));
 }
 
index 8a6006a..0a5b5eb 100644 (file)
@@ -60,7 +60,7 @@ class InstructionOperand : public ZoneObject {
   INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
   INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
 #undef INSTRUCTION_OPERAND_PREDICATE
-  bool Equals(InstructionOperand* other) const {
+  bool Equals(const InstructionOperand* other) const {
     return value_ == other->value_;
   }
 
@@ -120,6 +120,7 @@ class UnallocatedOperand : public InstructionOperand {
 
   explicit UnallocatedOperand(ExtendedPolicy policy)
       : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -128,6 +129,7 @@ class UnallocatedOperand : public InstructionOperand {
   UnallocatedOperand(BasicPolicy policy, int index)
       : InstructionOperand(UNALLOCATED, 0) {
     DCHECK(policy == FIXED_SLOT);
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(policy);
     value_ |= index << FixedSlotIndexField::kShift;
     DCHECK(this->fixed_slot_index() == index);
@@ -136,6 +138,7 @@ class UnallocatedOperand : public InstructionOperand {
   UnallocatedOperand(ExtendedPolicy policy, int index)
       : InstructionOperand(UNALLOCATED, 0) {
     DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -144,6 +147,7 @@ class UnallocatedOperand : public InstructionOperand {
 
   UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
       : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(lifetime);
@@ -198,7 +202,8 @@ class UnallocatedOperand : public InstructionOperand {
   class LifetimeField : public BitField<Lifetime, 25, 1> {};
   class FixedRegisterField : public BitField<int, 26, 6> {};
 
-  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kInvalidVirtualRegister = VirtualRegisterField::kMax;
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax;
   static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
   static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
   static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
@@ -586,7 +591,11 @@ class GapInstruction : public Instruction {
     return parallel_moves_[pos];
   }
 
-  ParallelMove* GetParallelMove(InnerPosition pos) const {
+  ParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+  const ParallelMove* GetParallelMove(InnerPosition pos) const {
     return parallel_moves_[pos];
   }
 
@@ -634,6 +643,11 @@ class BlockStartInstruction FINAL : public GapInstruction {
     return static_cast<BlockStartInstruction*>(instr);
   }
 
+  static const BlockStartInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsBlockStart());
+    return static_cast<const BlockStartInstruction*>(instr);
+  }
+
  private:
   BlockStartInstruction() : GapInstruction(kBlockStartInstruction) {}
 };
@@ -917,7 +931,7 @@ class InstructionSequence FINAL {
 
   void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
 
-  BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo);
+  BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
 
   typedef InstructionDeque::const_iterator const_iterator;
   const_iterator begin() const { return instructions_.begin(); }
index ad00f98..3fdf826 100644 (file)
@@ -24,6 +24,7 @@
 #include "src/compiler/machine-operator-reducer.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
+#include "src/compiler/register-allocator-verifier.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
 #include "src/compiler/select-lowering.h"
@@ -575,6 +576,13 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, PipelineData* data) {
     data->pipeline_statistics()->BeginPhaseKind("register allocation");
   }
 
+#ifdef DEBUG
+  // Don't track usage for this zone in compiler stats.
+  Zone verifier_zone(info()->isolate());
+  RegisterAllocatorVerifier verifier(
+      &verifier_zone, RegisterConfiguration::ArchDefault(), &sequence);
+#endif
+
   // Allocate registers.
   Frame frame;
   {
@@ -586,17 +594,14 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, PipelineData* data) {
     ZonePool::Scope zone_scope(data->zone_pool());
 
     SmartArrayPointer<char> debug_name;
-    RegisterAllocator::VerificationType verification_type =
-        RegisterAllocator::kNoVerify;
 #ifdef DEBUG
     debug_name = GetDebugName(info());
-    verification_type = RegisterAllocator::kVerifyAssignment;
 #endif
 
     RegisterAllocator allocator(RegisterConfiguration::ArchDefault(),
                                 zone_scope.zone(), &frame, &sequence,
                                 debug_name.get());
-    if (!allocator.Allocate(data->pipeline_statistics(), verification_type)) {
+    if (!allocator.Allocate(data->pipeline_statistics())) {
       info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
       return Handle<Code>::null();
     }
@@ -614,6 +619,11 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, PipelineData* data) {
        << printable;
   }
 
+#ifdef DEBUG
+  verifier.VerifyAssignment();
+  verifier.VerifyGapMoves();
+#endif
+
   if (data->pipeline_statistics() != NULL) {
     data->pipeline_statistics()->BeginPhaseKind("code generation");
   }
index b8aefe1..c4482a5 100644 (file)
@@ -14,41 +14,78 @@ static size_t OperandCount(const Instruction* instr) {
 }
 
 
+static void VerifyGapEmpty(const GapInstruction* gap) {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    GapInstruction::InnerPosition inner_pos =
+        static_cast<GapInstruction::InnerPosition>(i);
+    CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyInput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  if (constraint.type_ != kImmediate) {
+    CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+             constraint.virtual_register_);
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyTemp(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(kConstant, constraint.type_);
+  CHECK_EQ(UnallocatedOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
+
+
+void RegisterAllocatorVerifier::VerifyOutput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
+
+
 RegisterAllocatorVerifier::RegisterAllocatorVerifier(
-    Zone* zone, const InstructionSequence* sequence)
-    : sequence_(sequence), constraints_(zone) {
+    Zone* zone, const RegisterConfiguration* config,
+    const InstructionSequence* sequence)
+    : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
   constraints_.reserve(sequence->instructions().size());
+  // TODO(dcarney): model unique constraints.
+  // Construct OperandConstraints for all InstructionOperands, eliminating
+  // kSameAsFirst along the way.
   for (const auto* instr : sequence->instructions()) {
     const size_t operand_count = OperandCount(instr);
     auto* op_constraints =
         zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
-    // Construct OperandConstraints for all InstructionOperands, eliminating
-    // kSameAsFirst along the way.
     size_t count = 0;
     for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
       BuildConstraint(instr->InputAt(i), &op_constraints[count]);
-      CHECK_NE(kSameAsFirst, op_constraints[count].type_);
+      VerifyInput(op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+      BuildConstraint(instr->TempAt(i), &op_constraints[count]);
+      VerifyTemp(op_constraints[count]);
     }
     for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
       BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
       if (op_constraints[count].type_ == kSameAsFirst) {
         CHECK(instr->InputCount() > 0);
-        op_constraints[count] = op_constraints[0];
+        op_constraints[count].type_ = op_constraints[0].type_;
+        op_constraints[count].value_ = op_constraints[0].value_;
       }
-    }
-    for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
-      BuildConstraint(instr->TempAt(i), &op_constraints[count]);
-      CHECK_NE(kSameAsFirst, op_constraints[count].type_);
+      VerifyOutput(op_constraints[count]);
     }
     // All gaps should be totally unallocated at this point.
     if (instr->IsGapMoves()) {
-      const auto* gap = GapInstruction::cast(instr);
-      for (int i = GapInstruction::FIRST_INNER_POSITION;
-           i <= GapInstruction::LAST_INNER_POSITION; i++) {
-        GapInstruction::InnerPosition inner_pos =
-            static_cast<GapInstruction::InnerPosition>(i);
-        CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
-      }
+      CHECK(operand_count == 0);
+      VerifyGapEmpty(GapInstruction::cast(instr));
     }
     InstructionConstraint instr_constraint = {instr, operand_count,
                                               op_constraints};
@@ -70,12 +107,12 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
     for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
       CheckConstraint(instr->InputAt(i), &op_constraints[count]);
     }
-    for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
-      CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
-    }
     for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
       CheckConstraint(instr->TempAt(i), &op_constraints[count]);
     }
+    for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+      CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
+    }
     ++instr_it;
   }
 }
@@ -84,9 +121,11 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
 void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
                                                 OperandConstraint* constraint) {
   constraint->value_ = kMinInt;
+  constraint->virtual_register_ = UnallocatedOperand::kInvalidVirtualRegister;
   if (op->IsConstant()) {
     constraint->type_ = kConstant;
     constraint->value_ = ConstantOperand::cast(op)->index();
+    constraint->virtual_register_ = constraint->value_;
   } else if (op->IsImmediate()) {
     constraint->type_ = kImmediate;
     constraint->value_ = ImmediateOperand::cast(op)->index();
@@ -94,6 +133,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
     CHECK(op->IsUnallocated());
     const auto* unallocated = UnallocatedOperand::cast(op);
     int vreg = unallocated->virtual_register();
+    constraint->virtual_register_ = vreg;
     if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
       constraint->type_ = kFixedSlot;
       constraint->value_ = unallocated->fixed_slot_index();
@@ -174,6 +214,253 @@ void RegisterAllocatorVerifier::CheckConstraint(
   }
 }
 
+
+class RegisterAllocatorVerifier::OutgoingMapping : public ZoneObject {
+ public:
+  struct OperandLess {
+    bool operator()(const InstructionOperand* a,
+                    const InstructionOperand* b) const {
+      if (a->kind() == b->kind()) return a->index() < b->index();
+      return a->kind() < b->kind();
+    }
+  };
+
+  typedef std::map<
+      const InstructionOperand*, int, OperandLess,
+      zone_allocator<std::pair<const InstructionOperand*, const int>>>
+      LocationMap;
+
+  explicit OutgoingMapping(Zone* zone)
+      : locations_(LocationMap::key_compare(),
+                   LocationMap::allocator_type(zone)),
+        predecessor_intersection_(LocationMap::key_compare(),
+                                  LocationMap::allocator_type(zone)) {}
+
+  LocationMap* locations() { return &locations_; }
+
+  void RunPhis(const InstructionSequence* sequence,
+               const InstructionBlock* block, size_t phi_index) {
+    // This operation is only valid in edge split form.
+    size_t predecessor_index = block->predecessors()[phi_index].ToSize();
+    CHECK(sequence->instruction_blocks()[predecessor_index]->SuccessorCount() ==
+          1);
+    const auto* gap = sequence->GetBlockStart(block->rpo_number());
+    // The first moves in the BlockStartInstruction are the phi moves inserted
+    // by ResolvePhis.
+    const ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
+    CHECK_NE(nullptr, move);
+    const auto* move_ops = move->move_operands();
+    CHECK(block->phis().size() <= static_cast<size_t>(move_ops->length()));
+    auto move_it = move_ops->begin();
+    for (const auto* phi : block->phis()) {
+      const auto* op = move_it->source();
+      auto it = locations()->find(op);
+      CHECK(it != locations()->end());
+      CHECK_EQ(it->second, phi->operands()[phi_index]);
+      it->second = phi->virtual_register();
+      ++move_it;
+    }
+  }
+
+  void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
+    for (int i = GapInstruction::FIRST_INNER_POSITION;
+         i <= GapInstruction::LAST_INNER_POSITION; i++) {
+      GapInstruction::InnerPosition inner_pos =
+          static_cast<GapInstruction::InnerPosition>(i);
+      const ParallelMove* move = gap->GetParallelMove(inner_pos);
+      if (move == nullptr) continue;
+      RunParallelMoves(zone, move);
+    }
+  }
+
+  void RunParallelMoves(Zone* zone, const ParallelMove* move) {
+    // Compute outgoing mappings.
+    LocationMap to_insert((LocationMap::key_compare()),
+                          LocationMap::allocator_type(zone));
+    auto* moves = move->move_operands();
+    for (auto i = moves->begin(); i != moves->end(); ++i) {
+      if (i->IsEliminated()) continue;
+      CHECK(i->source()->kind() != InstructionOperand::INVALID);
+      auto cur = locations()->find(i->source());
+      CHECK(cur != locations()->end());
+      if (i->destination()->kind() == InstructionOperand::INVALID) continue;
+      to_insert.insert(std::make_pair(i->destination(), cur->second));
+    }
+    // Drop current mappings.
+    for (auto i = moves->begin(); i != moves->end(); ++i) {
+      if (i->IsEliminated()) continue;
+      auto cur = locations()->find(i->destination());
+      if (cur != locations()->end()) locations()->erase(cur);
+    }
+    // Insert new values.
+    locations()->insert(to_insert.begin(), to_insert.end());
+  }
+
+  void Map(const InstructionOperand* op, int virtual_register) {
+    locations()->insert(std::make_pair(op, virtual_register));
+  }
+
+  void Drop(const InstructionOperand* op) {
+    auto it = locations()->find(op);
+    if (it != locations()->end()) locations()->erase(it);
+  }
+
+  void DropRegisters(const RegisterConfiguration* config) {
+    for (int i = 0; i < config->num_general_registers(); ++i) {
+      InstructionOperand op(InstructionOperand::REGISTER, i);
+      Drop(&op);
+    }
+    for (int i = 0; i < config->num_double_registers(); ++i) {
+      InstructionOperand op(InstructionOperand::DOUBLE_REGISTER, i);
+      Drop(&op);
+    }
+  }
+
+  void InitializeFromFirstPredecessor(const InstructionSequence* sequence,
+                                      const OutgoingMappings* outgoing_mappings,
+                                      const InstructionBlock* block) {
+    if (block->predecessors().empty()) return;
+    size_t predecessor_index = block->predecessors()[0].ToSize();
+    CHECK(predecessor_index < block->rpo_number().ToSize());
+    auto* incoming = outgoing_mappings->at(predecessor_index);
+    if (block->PredecessorCount() > 1) {
+      // Update incoming map with phis. The remaining phis will be checked later
+      // as their mappings are not guaranteed to exist yet.
+      incoming->RunPhis(sequence, block, 0);
+    }
+    // Now initialize outgoing mapping for this block with incoming mapping.
+    CHECK(locations_.empty());
+    locations_ = incoming->locations_;
+  }
+
+  void InitializeFromIntersection() { locations_ = predecessor_intersection_; }
+
+  void InitializeIntersection(const OutgoingMapping* incoming) {
+    CHECK(predecessor_intersection_.empty());
+    predecessor_intersection_ = incoming->locations_;
+  }
+
+  void Intersect(const OutgoingMapping* other) {
+    if (predecessor_intersection_.empty()) return;
+    auto it = predecessor_intersection_.begin();
+    OperandLess less;
+    for (const auto& o : other->locations_) {
+      while (less(it->first, o.first)) {
+        ++it;
+        if (it == predecessor_intersection_.end()) return;
+      }
+      if (it->first->Equals(o.first)) {
+        if (o.second != it->second) {
+          predecessor_intersection_.erase(it++);
+        } else {
+          ++it;
+        }
+        if (it == predecessor_intersection_.end()) return;
+      }
+    }
+  }
+
+ private:
+  LocationMap locations_;
+  LocationMap predecessor_intersection_;
+
+  DISALLOW_COPY_AND_ASSIGN(OutgoingMapping);
+};
+
+
+// Verify that all gap moves move the operands for a virtual register into the
+// correct location for every instruction.
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+  typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
+  OutgoingMappings outgoing_mappings(
+      static_cast<int>(sequence()->instruction_blocks().size()), nullptr,
+      zone());
+  // Construct all mappings, ignoring back edges and multiple entries.
+  ConstructOutgoingMappings(&outgoing_mappings, true);
+  // Run all remaining phis and compute the intersection of all predecessor
+  // mappings.
+  for (const auto* block : sequence()->instruction_blocks()) {
+    if (block->PredecessorCount() == 0) continue;
+    const size_t block_index = block->rpo_number().ToSize();
+    auto* mapping = outgoing_mappings[block_index];
+    bool initialized = false;
+    // Walk predecessors in reverse to ensure Intersect is correctly working.
+    // If it did nothing, the second pass would do exactly what the first pass
+    // did.
+    for (size_t phi_input = block->PredecessorCount() - 1; true; --phi_input) {
+      const size_t pred_block_index = block->predecessors()[phi_input].ToSize();
+      auto* incoming = outgoing_mappings[pred_block_index];
+      if (phi_input != 0) incoming->RunPhis(sequence(), block, phi_input);
+      if (!initialized) {
+        mapping->InitializeIntersection(incoming);
+        initialized = true;
+      } else {
+        mapping->Intersect(incoming);
+      }
+      if (phi_input == 0) break;
+    }
+  }
+  // Construct all mappings again, this time using the instersection mapping
+  // above as the incoming mapping instead of the result from the first
+  // predecessor.
+  ConstructOutgoingMappings(&outgoing_mappings, false);
+}
+
+
+void RegisterAllocatorVerifier::ConstructOutgoingMappings(
+    OutgoingMappings* outgoing_mappings, bool initial_pass) {
+  // Compute the locations of all virtual registers leaving every block, using
+  // only the first predecessor as source for the input mapping.
+  for (const auto* block : sequence()->instruction_blocks()) {
+    const size_t block_index = block->rpo_number().ToSize();
+    auto* current = outgoing_mappings->at(block_index);
+    CHECK(initial_pass == (current == nullptr));
+    // Initialize current.
+    if (!initial_pass) {
+      // Skip check second time around for blocks without multiple predecessors
+      // as we have already executed this in the initial run.
+      if (block->PredecessorCount() <= 1) continue;
+      current->InitializeFromIntersection();
+    } else {
+      current = new (zone()) OutgoingMapping(zone());
+      outgoing_mappings->at(block_index) = current;
+      // Copy outgoing values from predecessor block.
+      current->InitializeFromFirstPredecessor(sequence(), outgoing_mappings,
+                                              block);
+    }
+    // Update current with gaps and operands for all instructions in block.
+    for (int instr_index = block->code_start(); instr_index < block->code_end();
+         ++instr_index) {
+      const auto& instr_constraint = constraints_[instr_index];
+      const auto* instr = instr_constraint.instruction_;
+      const auto* op_constraints = instr_constraint.operand_constraints_;
+      size_t count = 0;
+      for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+        if (op_constraints[count].type_ == kImmediate) continue;
+        auto it = current->locations()->find(instr->InputAt(i));
+        int virtual_register = op_constraints[count].virtual_register_;
+        CHECK(it != current->locations()->end());
+        CHECK_EQ(it->second, virtual_register);
+      }
+      for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+        current->Drop(instr->TempAt(i));
+      }
+      if (instr->IsCall()) {
+        current->DropRegisters(config());
+      }
+      for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+        current->Drop(instr->OutputAt(i));
+        int virtual_register = op_constraints[count].virtual_register_;
+        current->Map(instr->OutputAt(i), virtual_register);
+      }
+      if (instr->IsGapMoves()) {
+        const auto* gap = GapInstruction::cast(instr);
+        current->RunGapInstruction(zone(), gap);
+      }
+    }
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 10592e1..4e35dc2 100644 (file)
@@ -17,9 +17,11 @@ class InstructionSequence;
 
 class RegisterAllocatorVerifier FINAL : public ZoneObject {
  public:
-  RegisterAllocatorVerifier(Zone* zone, const InstructionSequence* sequence);
+  RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
+                            const InstructionSequence* sequence);
 
   void VerifyAssignment();
+  void VerifyGapMoves();
 
  private:
   enum ConstraintType {
@@ -38,6 +40,7 @@ class RegisterAllocatorVerifier FINAL : public ZoneObject {
   struct OperandConstraint {
     ConstraintType type_;
     int value_;  // subkind index when relevant
+    int virtual_register_;
   };
 
   struct InstructionConstraint {
@@ -46,15 +49,30 @@ class RegisterAllocatorVerifier FINAL : public ZoneObject {
     OperandConstraint* operand_constraints_;
   };
 
+  class OutgoingMapping;
+
   typedef ZoneVector<InstructionConstraint> Constraints;
+  typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
 
+  Zone* zone() const { return zone_; }
+  const RegisterConfiguration* config() { return config_; }
   const InstructionSequence* sequence() const { return sequence_; }
   Constraints* constraints() { return &constraints_; }
+
+  static void VerifyInput(const OperandConstraint& constraint);
+  static void VerifyTemp(const OperandConstraint& constraint);
+  static void VerifyOutput(const OperandConstraint& constraint);
+
   void BuildConstraint(const InstructionOperand* op,
                        OperandConstraint* constraint);
   void CheckConstraint(const InstructionOperand* op,
                        const OperandConstraint* constraint);
 
+  void ConstructOutgoingMappings(OutgoingMappings* outgoing_mappings,
+                                 bool initial_pass);
+
+  Zone* const zone_;
+  const RegisterConfiguration* config_;
   const InstructionSequence* const sequence_;
   Constraints constraints_;
 
index 0069df8..23a7df6 100644 (file)
@@ -5,7 +5,6 @@
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
-#include "src/compiler/register-allocator-verifier.h"
 #include "src/string-stream.h"
 
 namespace v8 {
@@ -1117,16 +1116,7 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
 }
 
 
-bool RegisterAllocator::Allocate(PipelineStatistics* stats,
-                                 VerificationType verification_type) {
-  SmartPointer<Zone> verifier_zone;
-  RegisterAllocatorVerifier* verifier = NULL;
-  if (verification_type == kVerifyAssignment) {
-    // Don't track usage for this zone in compiler stats.
-    verifier_zone.Reset(new Zone(local_zone()->isolate()));
-    verifier = new (verifier_zone.get())
-        RegisterAllocatorVerifier(verifier_zone.get(), code());
-  }
+bool RegisterAllocator::Allocate(PipelineStatistics* stats) {
   assigned_registers_ = new (code_zone())
       BitVector(config()->num_general_registers(), code_zone());
   assigned_double_registers_ = new (code_zone())
@@ -1168,9 +1158,6 @@ bool RegisterAllocator::Allocate(PipelineStatistics* stats,
   }
   frame()->SetAllocatedRegisters(assigned_registers_);
   frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
-  if (verifier != NULL) {
-    verifier->VerifyAssignment();
-  }
   return true;
 }
 
index 690957d..a6578af 100644 (file)
@@ -322,15 +322,12 @@ class LiveRange FINAL : public ZoneObject {
 
 class RegisterAllocator FINAL {
  public:
-  enum VerificationType { kNoVerify, kVerifyAssignment };
-
   explicit RegisterAllocator(const RegisterConfiguration* config,
                              Zone* local_zone, Frame* frame,
                              InstructionSequence* code,
                              const char* debug_name = nullptr);
 
-  bool Allocate(PipelineStatistics* stats = NULL,
-                VerificationType verification_type = kNoVerify);
+  bool Allocate(PipelineStatistics* stats = NULL);
   bool AllocationOk() { return allocation_ok_; }
   BitVector* assigned_registers() { return assigned_registers_; }
   BitVector* assigned_double_registers() { return assigned_double_registers_; }
index 9eb6753..fb2fabb 100644 (file)
@@ -129,36 +129,6 @@ TEST(RunBranch) {
 }
 
 
-TEST(RunRedundantBranch1) {
-  RawMachineAssemblerTester<int32_t> m;
-  int constant = 944777;
-
-  MLabel blocka;
-  m.Branch(m.Int32Constant(0), &blocka, &blocka);
-  m.Bind(&blocka);
-  m.Return(m.Int32Constant(constant));
-
-  CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunRedundantBranch2) {
-  RawMachineAssemblerTester<int32_t> m;
-  int constant = 966777;
-
-  MLabel blocka, blockb, blockc;
-  m.Branch(m.Int32Constant(0), &blocka, &blockc);
-  m.Bind(&blocka);
-  m.Branch(m.Int32Constant(0), &blockb, &blockb);
-  m.Bind(&blockc);
-  m.Goto(&blockb);
-  m.Bind(&blockb);
-  m.Return(m.Int32Constant(constant));
-
-  CHECK_EQ(constant, m.Call());
-}
-
-
 TEST(RunDiamond2) {
   RawMachineAssemblerTester<int32_t> m;
 
index dbcdedb..db457c0 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "src/base/utils/random-number-generator.h"
 #include "src/compiler/register-allocator.h"
+#include "src/compiler/register-allocator-verifier.h"
 #include "test/unittests/test-utils.h"
 #include "testing/gmock/include/gmock/gmock.h"
 
@@ -37,7 +38,7 @@ static void InitializeRegisterNames() {
   }
 }
 
-enum BlockCompletionType { kFallThrough, kBranch, kJump };
+enum BlockCompletionType { kBlockEnd, kFallThrough, kBranch, kJump };
 
 struct BlockCompletion {
   BlockCompletionType type_;
@@ -77,7 +78,8 @@ class RegisterAllocatorTest : public TestWithZone {
         num_double_registers_(kDefaultNRegs),
         instruction_blocks_(zone()),
         current_block_(nullptr),
-        is_last_block_(false) {
+        is_last_block_(false),
+        block_returns_(false) {
     InitializeRegisterNames();
   }
 
@@ -142,21 +144,29 @@ class RegisterAllocatorTest : public TestWithZone {
 
   void StartBlock() {
     CHECK(!is_last_block_);
+    block_returns_ = false;
     NewBlock();
   }
 
   void EndBlock(BlockCompletion completion = FallThrough()) {
     completions_.push_back(completion);
     switch (completion.type_) {
+      case kBlockEnd:
+        CHECK(false);  // unreachable;
+        break;
       case kFallThrough:
-        if (is_last_block_) break;
-        // TODO(dcarney): we don't emit this after returns.
+        if (is_last_block_ || block_returns_) {
+          completions_.back().type_ = kBlockEnd;
+          break;
+        }
         EmitFallThrough();
         break;
       case kJump:
+        CHECK(!block_returns_);
         EmitJump();
         break;
       case kBranch:
+        CHECK(!block_returns_);
         EmitBranch(completion.vreg_);
         break;
     }
@@ -169,6 +179,7 @@ class RegisterAllocatorTest : public TestWithZone {
     CHECK_EQ(nullptr, current_block_);
     CHECK(is_last_block_);
     WireBlocks();
+    RegisterAllocatorVerifier verifier(zone(), config(), sequence());
     if (FLAG_trace_alloc || FLAG_trace_turbo) {
       OFStream os(stdout);
       PrintableInstructionSequence printable = {config(), sequence()};
@@ -180,6 +191,8 @@ class RegisterAllocatorTest : public TestWithZone {
       PrintableInstructionSequence printable = {config(), sequence()};
       os << "After: " << std::endl << printable << std::endl;
     }
+    verifier.VerifyAssignment();
+    verifier.VerifyGapMoves();
   }
 
   int NewReg() { return sequence()->NextVirtualRegister(); }
@@ -192,6 +205,7 @@ class RegisterAllocatorTest : public TestWithZone {
   }
 
   Instruction* Return(int vreg) {
+    block_returns_ = true;
     InstructionOperand* inputs[1]{UseRegister(vreg)};
     return Emit(kArchRet, 0, nullptr, 1, inputs);
   }
@@ -344,12 +358,11 @@ class RegisterAllocatorTest : public TestWithZone {
   void WireBlocks() {
     CHECK(instruction_blocks_.size() == completions_.size());
     size_t offset = 0;
-    size_t size = instruction_blocks_.size();
     for (const auto& completion : completions_) {
       switch (completion.type_) {
-        case kFallThrough:
-          if (offset == size - 1) break;
-        // Fallthrough.
+        case kBlockEnd:
+          break;
+        case kFallThrough:  // Fallthrough.
         case kJump:
           WireBlock(offset, completion.offset_0_);
           break;
@@ -393,6 +406,7 @@ class RegisterAllocatorTest : public TestWithZone {
   LoopBlocks loop_blocks_;
   InstructionBlock* current_block_;
   bool is_last_block_;
+  bool block_returns_;
 };