Revert of [turbofan] add MachineType to AllocatedOperand (patchset #17 id:310001...
authormachenbach <machenbach@chromium.org>
Wed, 29 Apr 2015 18:28:38 +0000 (11:28 -0700)
committerCommit bot <commit-bot@chromium.org>
Wed, 29 Apr 2015 18:28:47 +0000 (18:28 +0000)
Reason for revert:
[Sheriff] Breaks compile on chromium asan and v8 msan:
http://build.chromium.org/p/client.v8/builders/Linux%20ASAN%20Builder/builds/3446
http://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/builds/2085

Original issue's description:
> [turbofan] add MachineType to AllocatedOperand
>
> - allows the optimization of emitted gap move code since the representation of the value in the register is known
> - necessary preparation for vector register allocation
> - prepare for slot sharing for any value of the same byte width
>
> BUG=
>
> Committed: https://crrev.com/3a025d1ab6437559f86a464767aa03d2d9789f6f
> Cr-Commit-Position: refs/heads/master@{#28137}

TBR=jarin@chromium.org,dcarney@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1119483003

Cr-Commit-Position: refs/heads/master@{#28139}

16 files changed:
src/compiler/frame.h
src/compiler/gap-resolver.cc
src/compiler/instruction-selector-impl.h
src/compiler/instruction-selector.cc
src/compiler/instruction-selector.h
src/compiler/instruction.cc
src/compiler/instruction.h
src/compiler/move-optimizer.cc
src/compiler/register-allocator-verifier.cc
src/compiler/register-allocator.cc
src/compiler/register-allocator.h
test/cctest/compiler/test-gap-resolver.cc
test/cctest/compiler/test-instruction.cc
test/cctest/compiler/test-jump-threading.cc
test/unittests/compiler/instruction-selector-unittest.cc
test/unittests/compiler/move-optimizer-unittest.cc

index 2850a8c..a5dd116 100644 (file)
@@ -20,11 +20,13 @@ class Frame : public ZoneObject {
   Frame()
       : register_save_area_size_(0),
         spill_slot_count_(0),
+        double_spill_slot_count_(0),
         osr_stack_slot_count_(0),
         allocated_registers_(NULL),
         allocated_double_registers_(NULL) {}
 
   inline int GetSpillSlotCount() { return spill_slot_count_; }
+  inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
 
   void SetAllocatedRegisters(BitVector* regs) {
     DCHECK(allocated_registers_ == NULL);
@@ -55,13 +57,15 @@ class Frame : public ZoneObject {
 
   int GetOsrStackSlotCount() { return osr_stack_slot_count_; }
 
-  int AllocateSpillSlot(int width) {
-    DCHECK(width == 4 || width == 8);
-    // Skip one slot if necessary.
-    if (width > kPointerSize) {
-      DCHECK(width == kPointerSize * 2);
-      spill_slot_count_++;
-      spill_slot_count_ |= 1;
+  int AllocateSpillSlot(bool is_double) {
+    // If 32-bit, skip one if the new slot is a double.
+    if (is_double) {
+      if (kDoubleSize > kPointerSize) {
+        DCHECK(kDoubleSize == kPointerSize * 2);
+        spill_slot_count_++;
+        spill_slot_count_ |= 1;
+      }
+      double_spill_slot_count_++;
     }
     return spill_slot_count_++;
   }
@@ -74,6 +78,7 @@ class Frame : public ZoneObject {
  private:
   int register_save_area_size_;
   int spill_slot_count_;
+  int double_spill_slot_count_;
   int osr_stack_slot_count_;
   BitVector* allocated_registers_;
   BitVector* allocated_double_registers_;
index e5de737..d9b20b6 100644 (file)
@@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
   // This move's source may have changed due to swaps to resolve cycles and so
   // it may now be the last move in the cycle.  If so remove it.
   InstructionOperand source = move->source();
-  if (source.EqualsModuloType(destination)) {
+  if (source == destination) {
     move->Eliminate();
     return;
   }
index b34a914..f4acfa0 100644 (file)
@@ -137,7 +137,7 @@ class OperandGenerator {
     UnallocatedOperand op = UnallocatedOperand(
         UnallocatedOperand::MUST_HAVE_REGISTER,
         UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
-    sequence()->MarkAsRepresentation(kRepFloat64, op.virtual_register());
+    sequence()->MarkAsDouble(op.virtual_register());
     return op;
   }
 
index 3d6c1fe..1e2ab97 100644 (file)
@@ -241,17 +241,70 @@ void InstructionSelector::MarkAsUsed(Node* node) {
 }
 
 
+bool InstructionSelector::IsDouble(const Node* node) const {
+  DCHECK_NOT_NULL(node);
+  int const virtual_register = virtual_registers_[node->id()];
+  if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+    return false;
+  }
+  return sequence()->IsDouble(virtual_register);
+}
+
+
+void InstructionSelector::MarkAsDouble(Node* node) {
+  DCHECK_NOT_NULL(node);
+  DCHECK(!IsReference(node));
+  sequence()->MarkAsDouble(GetVirtualRegister(node));
+}
+
+
+bool InstructionSelector::IsReference(const Node* node) const {
+  DCHECK_NOT_NULL(node);
+  int const virtual_register = virtual_registers_[node->id()];
+  if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+    return false;
+  }
+  return sequence()->IsReference(virtual_register);
+}
+
+
+void InstructionSelector::MarkAsReference(Node* node) {
+  DCHECK_NOT_NULL(node);
+  DCHECK(!IsDouble(node));
+  sequence()->MarkAsReference(GetVirtualRegister(node));
+}
+
+
 void InstructionSelector::MarkAsRepresentation(MachineType rep,
                                                const InstructionOperand& op) {
   UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
-  rep = RepresentationOf(rep);
-  sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
+  switch (RepresentationOf(rep)) {
+    case kRepFloat32:
+    case kRepFloat64:
+      sequence()->MarkAsDouble(unalloc.virtual_register());
+      break;
+    case kRepTagged:
+      sequence()->MarkAsReference(unalloc.virtual_register());
+      break;
+    default:
+      break;
+  }
 }
 
 
 void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
-  rep = RepresentationOf(rep);
-  sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
+  DCHECK_NOT_NULL(node);
+  switch (RepresentationOf(rep)) {
+    case kRepFloat32:
+    case kRepFloat64:
+      MarkAsDouble(node);
+      break;
+    case kRepTagged:
+      MarkAsReference(node);
+      break;
+    default:
+      break;
+  }
 }
 
 
@@ -572,9 +625,9 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kExternalConstant:
       return VisitConstant(node);
     case IrOpcode::kFloat32Constant:
-      return MarkAsFloat32(node), VisitConstant(node);
+      return MarkAsDouble(node), VisitConstant(node);
     case IrOpcode::kFloat64Constant:
-      return MarkAsFloat64(node), VisitConstant(node);
+      return MarkAsDouble(node), VisitConstant(node);
     case IrOpcode::kHeapConstant:
       return MarkAsReference(node), VisitConstant(node);
     case IrOpcode::kNumberConstant: {
@@ -595,125 +648,125 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kStore:
       return VisitStore(node);
     case IrOpcode::kWord32And:
-      return MarkAsWord32(node), VisitWord32And(node);
+      return VisitWord32And(node);
     case IrOpcode::kWord32Or:
-      return MarkAsWord32(node), VisitWord32Or(node);
+      return VisitWord32Or(node);
     case IrOpcode::kWord32Xor:
-      return MarkAsWord32(node), VisitWord32Xor(node);
+      return VisitWord32Xor(node);
     case IrOpcode::kWord32Shl:
-      return MarkAsWord32(node), VisitWord32Shl(node);
+      return VisitWord32Shl(node);
     case IrOpcode::kWord32Shr:
-      return MarkAsWord32(node), VisitWord32Shr(node);
+      return VisitWord32Shr(node);
     case IrOpcode::kWord32Sar:
-      return MarkAsWord32(node), VisitWord32Sar(node);
+      return VisitWord32Sar(node);
     case IrOpcode::kWord32Ror:
-      return MarkAsWord32(node), VisitWord32Ror(node);
+      return VisitWord32Ror(node);
     case IrOpcode::kWord32Equal:
       return VisitWord32Equal(node);
     case IrOpcode::kWord32Clz:
-      return MarkAsWord32(node), VisitWord32Clz(node);
+      return VisitWord32Clz(node);
     case IrOpcode::kWord64And:
-      return MarkAsWord64(node), VisitWord64And(node);
+      return VisitWord64And(node);
     case IrOpcode::kWord64Or:
-      return MarkAsWord64(node), VisitWord64Or(node);
+      return VisitWord64Or(node);
     case IrOpcode::kWord64Xor:
-      return MarkAsWord64(node), VisitWord64Xor(node);
+      return VisitWord64Xor(node);
     case IrOpcode::kWord64Shl:
-      return MarkAsWord64(node), VisitWord64Shl(node);
+      return VisitWord64Shl(node);
     case IrOpcode::kWord64Shr:
-      return MarkAsWord64(node), VisitWord64Shr(node);
+      return VisitWord64Shr(node);
     case IrOpcode::kWord64Sar:
-      return MarkAsWord64(node), VisitWord64Sar(node);
+      return VisitWord64Sar(node);
     case IrOpcode::kWord64Ror:
-      return MarkAsWord64(node), VisitWord64Ror(node);
+      return VisitWord64Ror(node);
     case IrOpcode::kWord64Equal:
       return VisitWord64Equal(node);
     case IrOpcode::kInt32Add:
-      return MarkAsWord32(node), VisitInt32Add(node);
+      return VisitInt32Add(node);
     case IrOpcode::kInt32AddWithOverflow:
-      return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
+      return VisitInt32AddWithOverflow(node);
     case IrOpcode::kInt32Sub:
-      return MarkAsWord32(node), VisitInt32Sub(node);
+      return VisitInt32Sub(node);
     case IrOpcode::kInt32SubWithOverflow:
       return VisitInt32SubWithOverflow(node);
     case IrOpcode::kInt32Mul:
-      return MarkAsWord32(node), VisitInt32Mul(node);
+      return VisitInt32Mul(node);
     case IrOpcode::kInt32MulHigh:
       return VisitInt32MulHigh(node);
     case IrOpcode::kInt32Div:
-      return MarkAsWord32(node), VisitInt32Div(node);
+      return VisitInt32Div(node);
     case IrOpcode::kInt32Mod:
-      return MarkAsWord32(node), VisitInt32Mod(node);
+      return VisitInt32Mod(node);
     case IrOpcode::kInt32LessThan:
       return VisitInt32LessThan(node);
     case IrOpcode::kInt32LessThanOrEqual:
       return VisitInt32LessThanOrEqual(node);
     case IrOpcode::kUint32Div:
-      return MarkAsWord32(node), VisitUint32Div(node);
+      return VisitUint32Div(node);
     case IrOpcode::kUint32LessThan:
       return VisitUint32LessThan(node);
     case IrOpcode::kUint32LessThanOrEqual:
       return VisitUint32LessThanOrEqual(node);
     case IrOpcode::kUint32Mod:
-      return MarkAsWord32(node), VisitUint32Mod(node);
+      return VisitUint32Mod(node);
     case IrOpcode::kUint32MulHigh:
       return VisitUint32MulHigh(node);
     case IrOpcode::kInt64Add:
-      return MarkAsWord64(node), VisitInt64Add(node);
+      return VisitInt64Add(node);
     case IrOpcode::kInt64Sub:
-      return MarkAsWord64(node), VisitInt64Sub(node);
+      return VisitInt64Sub(node);
     case IrOpcode::kInt64Mul:
-      return MarkAsWord64(node), VisitInt64Mul(node);
+      return VisitInt64Mul(node);
     case IrOpcode::kInt64Div:
-      return MarkAsWord64(node), VisitInt64Div(node);
+      return VisitInt64Div(node);
     case IrOpcode::kInt64Mod:
-      return MarkAsWord64(node), VisitInt64Mod(node);
+      return VisitInt64Mod(node);
     case IrOpcode::kInt64LessThan:
       return VisitInt64LessThan(node);
     case IrOpcode::kInt64LessThanOrEqual:
       return VisitInt64LessThanOrEqual(node);
     case IrOpcode::kUint64Div:
-      return MarkAsWord64(node), VisitUint64Div(node);
+      return VisitUint64Div(node);
     case IrOpcode::kUint64LessThan:
       return VisitUint64LessThan(node);
     case IrOpcode::kUint64Mod:
-      return MarkAsWord64(node), VisitUint64Mod(node);
+      return VisitUint64Mod(node);
     case IrOpcode::kChangeFloat32ToFloat64:
-      return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
+      return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
     case IrOpcode::kChangeInt32ToFloat64:
-      return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
+      return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
     case IrOpcode::kChangeUint32ToFloat64:
-      return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
+      return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
     case IrOpcode::kChangeFloat64ToInt32:
-      return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
+      return VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
-      return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+      return VisitChangeFloat64ToUint32(node);
     case IrOpcode::kChangeInt32ToInt64:
-      return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
+      return VisitChangeInt32ToInt64(node);
     case IrOpcode::kChangeUint32ToUint64:
-      return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
+      return VisitChangeUint32ToUint64(node);
     case IrOpcode::kTruncateFloat64ToFloat32:
-      return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
+      return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
     case IrOpcode::kTruncateFloat64ToInt32:
-      return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
+      return VisitTruncateFloat64ToInt32(node);
     case IrOpcode::kTruncateInt64ToInt32:
-      return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+      return VisitTruncateInt64ToInt32(node);
     case IrOpcode::kFloat32Add:
-      return MarkAsFloat32(node), VisitFloat32Add(node);
+      return MarkAsDouble(node), VisitFloat32Add(node);
     case IrOpcode::kFloat32Sub:
-      return MarkAsFloat32(node), VisitFloat32Sub(node);
+      return MarkAsDouble(node), VisitFloat32Sub(node);
     case IrOpcode::kFloat32Mul:
-      return MarkAsFloat32(node), VisitFloat32Mul(node);
+      return MarkAsDouble(node), VisitFloat32Mul(node);
     case IrOpcode::kFloat32Div:
-      return MarkAsFloat32(node), VisitFloat32Div(node);
+      return MarkAsDouble(node), VisitFloat32Div(node);
     case IrOpcode::kFloat32Min:
-      return MarkAsFloat32(node), VisitFloat32Min(node);
+      return MarkAsDouble(node), VisitFloat32Min(node);
     case IrOpcode::kFloat32Max:
-      return MarkAsFloat32(node), VisitFloat32Max(node);
+      return MarkAsDouble(node), VisitFloat32Max(node);
     case IrOpcode::kFloat32Abs:
-      return MarkAsFloat32(node), VisitFloat32Abs(node);
+      return MarkAsDouble(node), VisitFloat32Abs(node);
     case IrOpcode::kFloat32Sqrt:
-      return MarkAsFloat32(node), VisitFloat32Sqrt(node);
+      return MarkAsDouble(node), VisitFloat32Sqrt(node);
     case IrOpcode::kFloat32Equal:
       return VisitFloat32Equal(node);
     case IrOpcode::kFloat32LessThan:
@@ -721,23 +774,23 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kFloat32LessThanOrEqual:
       return VisitFloat32LessThanOrEqual(node);
     case IrOpcode::kFloat64Add:
-      return MarkAsFloat64(node), VisitFloat64Add(node);
+      return MarkAsDouble(node), VisitFloat64Add(node);
     case IrOpcode::kFloat64Sub:
-      return MarkAsFloat64(node), VisitFloat64Sub(node);
+      return MarkAsDouble(node), VisitFloat64Sub(node);
     case IrOpcode::kFloat64Mul:
-      return MarkAsFloat64(node), VisitFloat64Mul(node);
+      return MarkAsDouble(node), VisitFloat64Mul(node);
     case IrOpcode::kFloat64Div:
-      return MarkAsFloat64(node), VisitFloat64Div(node);
+      return MarkAsDouble(node), VisitFloat64Div(node);
     case IrOpcode::kFloat64Mod:
-      return MarkAsFloat64(node), VisitFloat64Mod(node);
+      return MarkAsDouble(node), VisitFloat64Mod(node);
     case IrOpcode::kFloat64Min:
-      return MarkAsFloat64(node), VisitFloat64Min(node);
+      return MarkAsDouble(node), VisitFloat64Min(node);
     case IrOpcode::kFloat64Max:
-      return MarkAsFloat64(node), VisitFloat64Max(node);
+      return MarkAsDouble(node), VisitFloat64Max(node);
     case IrOpcode::kFloat64Abs:
-      return MarkAsFloat64(node), VisitFloat64Abs(node);
+      return MarkAsDouble(node), VisitFloat64Abs(node);
     case IrOpcode::kFloat64Sqrt:
-      return MarkAsFloat64(node), VisitFloat64Sqrt(node);
+      return MarkAsDouble(node), VisitFloat64Sqrt(node);
     case IrOpcode::kFloat64Equal:
       return VisitFloat64Equal(node);
     case IrOpcode::kFloat64LessThan:
@@ -745,19 +798,19 @@ void InstructionSelector::VisitNode(Node* node) {
     case IrOpcode::kFloat64LessThanOrEqual:
       return VisitFloat64LessThanOrEqual(node);
     case IrOpcode::kFloat64RoundDown:
-      return MarkAsFloat64(node), VisitFloat64RoundDown(node);
+      return MarkAsDouble(node), VisitFloat64RoundDown(node);
     case IrOpcode::kFloat64RoundTruncate:
-      return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
+      return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
     case IrOpcode::kFloat64RoundTiesAway:
-      return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
+      return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
     case IrOpcode::kFloat64ExtractLowWord32:
-      return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
+      return VisitFloat64ExtractLowWord32(node);
     case IrOpcode::kFloat64ExtractHighWord32:
-      return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
+      return VisitFloat64ExtractHighWord32(node);
     case IrOpcode::kFloat64InsertLowWord32:
-      return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
+      return MarkAsDouble(node), VisitFloat64InsertLowWord32(node);
     case IrOpcode::kFloat64InsertHighWord32:
-      return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
+      return MarkAsDouble(node), VisitFloat64InsertHighWord32(node);
     case IrOpcode::kLoadStackPointer:
       return VisitLoadStackPointer(node);
     case IrOpcode::kCheckedLoad: {
index b305408..6a7b962 100644 (file)
@@ -146,14 +146,21 @@ class InstructionSelector final {
   // will need to generate code for it.
   void MarkAsUsed(Node* node);
 
+  // Checks if {node} is marked as double.
+  bool IsDouble(const Node* node) const;
+
+  // Inform the register allocator of a double result.
+  void MarkAsDouble(Node* node);
+
+  // Checks if {node} is marked as reference.
+  bool IsReference(const Node* node) const;
+
+  // Inform the register allocator of a reference result.
+  void MarkAsReference(Node* node);
+
   // Inform the register allocation of the representation of the value produced
   // by {node}.
   void MarkAsRepresentation(MachineType rep, Node* node);
-  void MarkAsWord32(Node* node) { MarkAsRepresentation(kRepWord32, node); }
-  void MarkAsWord64(Node* node) { MarkAsRepresentation(kRepWord64, node); }
-  void MarkAsFloat32(Node* node) { MarkAsRepresentation(kRepFloat32, node); }
-  void MarkAsFloat64(Node* node) { MarkAsRepresentation(kRepFloat64, node); }
-  void MarkAsReference(Node* node) { MarkAsRepresentation(kRepTagged, node); }
 
   // Inform the register allocation of the representation of the unallocated
   // operand {op}.
index ed66db2..df3d345 100644 (file)
@@ -53,48 +53,22 @@ std::ostream& operator<<(std::ostream& os,
           return os << "[immediate:" << imm.indexed_value() << "]";
       }
     }
-    case InstructionOperand::ALLOCATED: {
-      auto allocated = AllocatedOperand::cast(op);
-      switch (allocated.allocated_kind()) {
+    case InstructionOperand::ALLOCATED:
+      switch (AllocatedOperand::cast(op).allocated_kind()) {
         case AllocatedOperand::STACK_SLOT:
-          os << "[stack:" << StackSlotOperand::cast(op).index();
-          break;
+          return os << "[stack:" << StackSlotOperand::cast(op).index() << "]";
         case AllocatedOperand::DOUBLE_STACK_SLOT:
-          os << "[double_stack:" << DoubleStackSlotOperand::cast(op).index();
-          break;
+          return os << "[double_stack:"
+                    << DoubleStackSlotOperand::cast(op).index() << "]";
         case AllocatedOperand::REGISTER:
-          os << "["
-             << conf->general_register_name(RegisterOperand::cast(op).index())
-             << "|R";
-          break;
+          return os << "["
+                    << conf->general_register_name(
+                           RegisterOperand::cast(op).index()) << "|R]";
         case AllocatedOperand::DOUBLE_REGISTER:
-          os << "["
-             << conf->double_register_name(
-                    DoubleRegisterOperand::cast(op).index()) << "|R";
-          break;
+          return os << "["
+                    << conf->double_register_name(
+                           DoubleRegisterOperand::cast(op).index()) << "|R]";
       }
-      switch (allocated.machine_type()) {
-        case kRepWord32:
-          os << "|w32";
-          break;
-        case kRepWord64:
-          os << "|w64";
-          break;
-        case kRepFloat32:
-          os << "|f32";
-          break;
-        case kRepFloat64:
-          os << "|f64";
-          break;
-        case kRepTagged:
-          os << "|t";
-          break;
-        default:
-          os << "|?";
-          break;
-      }
-      return os << "]";
-    }
     case InstructionOperand::INVALID:
       return os << "(x)";
   }
@@ -109,7 +83,7 @@ std::ostream& operator<<(std::ostream& os,
   PrintableInstructionOperand printable_op = {printable.register_configuration_,
                                               mo.destination()};
   os << printable_op;
-  if (!mo.source().Equals(mo.destination())) {
+  if (mo.source() != mo.destination()) {
     printable_op.op_ = mo.source();
     os << " = " << printable_op;
   }
@@ -130,11 +104,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
   MoveOperands* to_eliminate = nullptr;
   for (auto curr : *this) {
     if (curr->IsEliminated()) continue;
-    if (curr->destination().EqualsModuloType(move->source())) {
+    if (curr->destination() == move->source()) {
       DCHECK(!replacement);
       replacement = curr;
       if (to_eliminate != nullptr) break;
-    } else if (curr->destination().EqualsModuloType(move->destination())) {
+    } else if (curr->destination() == move->destination()) {
       DCHECK(!to_eliminate);
       to_eliminate = curr;
       if (replacement != nullptr) break;
@@ -505,7 +479,8 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
       instructions_(zone()),
       next_virtual_register_(0),
       reference_maps_(zone()),
-      representations_(zone()),
+      doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+      references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
       deoptimization_entries_(zone()) {
   block_starts_.reserve(instruction_blocks_->size());
 }
@@ -573,48 +548,23 @@ const InstructionBlock* InstructionSequence::GetInstructionBlock(
 }
 
 
-static MachineType FilterRepresentation(MachineType rep) {
-  DCHECK_EQ(rep, RepresentationOf(rep));
-  switch (rep) {
-    case kRepBit:
-    case kRepWord8:
-    case kRepWord16:
-      return InstructionSequence::DefaultRepresentation();
-    case kRepWord32:
-    case kRepWord64:
-    case kRepFloat32:
-    case kRepFloat64:
-    case kRepTagged:
-      return rep;
-    default:
-      break;
-  }
-  UNREACHABLE();
-  return kMachNone;
+bool InstructionSequence::IsReference(int virtual_register) const {
+  return references_.find(virtual_register) != references_.end();
 }
 
 
-MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
-  DCHECK_LE(0, virtual_register);
-  DCHECK_LT(virtual_register, VirtualRegisterCount());
-  if (virtual_register >= static_cast<int>(representations_.size())) {
-    return DefaultRepresentation();
-  }
-  return representations_[virtual_register];
+bool InstructionSequence::IsDouble(int virtual_register) const {
+  return doubles_.find(virtual_register) != doubles_.end();
 }
 
 
-void InstructionSequence::MarkAsRepresentation(MachineType machine_type,
-                                               int virtual_register) {
-  DCHECK_LE(0, virtual_register);
-  DCHECK_LT(virtual_register, VirtualRegisterCount());
-  if (virtual_register >= static_cast<int>(representations_.size())) {
-    representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
-  }
-  machine_type = FilterRepresentation(machine_type);
-  DCHECK_IMPLIES(representations_[virtual_register] != machine_type,
-                 representations_[virtual_register] == DefaultRepresentation());
-  representations_[virtual_register] = machine_type;
+void InstructionSequence::MarkAsReference(int virtual_register) {
+  references_.insert(virtual_register);
+}
+
+
+void InstructionSequence::MarkAsDouble(int virtual_register) {
+  doubles_.insert(virtual_register);
 }
 
 
index 7d5105b..9b97282 100644 (file)
@@ -50,6 +50,19 @@ class InstructionOperand {
   inline bool IsStackSlot() const;
   inline bool IsDoubleStackSlot() const;
 
+  // Useful for map/set keys.
+  bool operator<(const InstructionOperand& op) const {
+    return value_ < op.value_;
+  }
+
+  bool operator==(const InstructionOperand& op) const {
+    return value_ == op.value_;
+  }
+
+  bool operator!=(const InstructionOperand& op) const {
+    return value_ != op.value_;
+  }
+
   template <typename SubKindOperand>
   static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
     void* buffer = zone->New(sizeof(op));
@@ -61,43 +74,22 @@ class InstructionOperand {
     *dest = *src;
   }
 
-  bool Equals(const InstructionOperand& that) const {
-    return this->value_ == that.value_;
-  }
-
-  bool Compare(const InstructionOperand& that) const {
-    return this->value_ < that.value_;
-  }
-
-  bool EqualsModuloType(const InstructionOperand& that) const {
-    return this->GetValueModuloType() == that.GetValueModuloType();
-  }
-
-  bool CompareModuloType(const InstructionOperand& that) const {
-    return this->GetValueModuloType() < that.GetValueModuloType();
-  }
-
  protected:
   explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
 
-  inline uint64_t GetValueModuloType() const;
-
   class KindField : public BitField64<Kind, 0, 3> {};
 
   uint64_t value_;
 };
 
-
 struct PrintableInstructionOperand {
   const RegisterConfiguration* register_configuration_;
   InstructionOperand op_;
 };
 
-
 std::ostream& operator<<(std::ostream& os,
                          const PrintableInstructionOperand& op);
 
-
 #define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind)      \
                                                                  \
   static OperandType* cast(InstructionOperand* op) {             \
@@ -354,8 +346,6 @@ class ImmediateOperand : public InstructionOperand {
 
 class AllocatedOperand : public InstructionOperand {
  public:
-  // TODO(dcarney): machine_type makes this now redundant. Just need to know is
-  // the operand is a slot or a register.
   enum AllocatedKind {
     STACK_SLOT,
     DOUBLE_STACK_SLOT,
@@ -363,12 +353,10 @@ class AllocatedOperand : public InstructionOperand {
     DOUBLE_REGISTER
   };
 
-  AllocatedOperand(AllocatedKind kind, MachineType machine_type, int index)
+  AllocatedOperand(AllocatedKind kind, int index)
       : InstructionOperand(ALLOCATED) {
     DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0);
-    DCHECK(IsSupportedMachineType(machine_type));
     value_ |= AllocatedKindField::encode(kind);
-    value_ |= MachineTypeField::encode(machine_type);
     value_ |= static_cast<int64_t>(index) << IndexField::kShift;
   }
 
@@ -380,33 +368,14 @@ class AllocatedOperand : public InstructionOperand {
     return AllocatedKindField::decode(value_);
   }
 
-  MachineType machine_type() const { return MachineTypeField::decode(value_); }
-
-  static AllocatedOperand* New(Zone* zone, AllocatedKind kind,
-                               MachineType machine_type, int index) {
-    return InstructionOperand::New(zone,
-                                   AllocatedOperand(kind, machine_type, index));
-  }
-
-  static bool IsSupportedMachineType(MachineType machine_type) {
-    if (RepresentationOf(machine_type) != machine_type) return false;
-    switch (machine_type) {
-      case kRepWord32:
-      case kRepWord64:
-      case kRepFloat32:
-      case kRepFloat64:
-      case kRepTagged:
-        return true;
-      default:
-        return false;
-    }
+  static AllocatedOperand* New(Zone* zone, AllocatedKind kind, int index) {
+    return InstructionOperand::New(zone, AllocatedOperand(kind, index));
   }
 
   INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
 
   STATIC_ASSERT(KindField::kSize == 3);
   class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {};
-  class MachineTypeField : public BitField64<MachineType, 5, 16> {};
   class IndexField : public BitField64<int32_t, 35, 29> {};
 };
 
@@ -431,17 +400,14 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS)
 #undef ALLOCATED_OPERAND_IS
 
 
-// TODO(dcarney): these subkinds are now pretty useless, nuke.
 #define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind)                       \
   class SubKind##Operand final : public AllocatedOperand {                   \
    public:                                                                   \
-    explicit SubKind##Operand(MachineType machine_type, int index)           \
-        : AllocatedOperand(kOperandKind, machine_type, index) {}             \
+    explicit SubKind##Operand(int index)                                     \
+        : AllocatedOperand(kOperandKind, index) {}                           \
                                                                              \
-    static SubKind##Operand* New(Zone* zone, MachineType machine_type,       \
-                                 int index) {                                \
-      return InstructionOperand::New(zone,                                   \
-                                     SubKind##Operand(machine_type, index)); \
+    static SubKind##Operand* New(Zone* zone, int index) {                    \
+      return InstructionOperand::New(zone, SubKind##Operand(index));         \
     }                                                                        \
                                                                              \
     static SubKind##Operand* cast(InstructionOperand* op) {                  \
@@ -463,24 +429,6 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
 #undef ALLOCATED_OPERAND_CLASS
 
 
-uint64_t InstructionOperand::GetValueModuloType() const {
-  if (IsAllocated()) {
-    // TODO(dcarney): put machine type last and mask.
-    return AllocatedOperand::MachineTypeField::update(this->value_, kMachNone);
-  }
-  return this->value_;
-}
-
-
-// Required for maps that don't care about machine type.
-struct CompareOperandModuloType {
-  bool operator()(const InstructionOperand& a,
-                  const InstructionOperand& b) const {
-    return a.CompareModuloType(b);
-  }
-};
-
-
 class MoveOperands final : public ZoneObject {
  public:
   MoveOperands(const InstructionOperand& source,
@@ -508,14 +456,14 @@ class MoveOperands final : public ZoneObject {
 
   // True if this move a move into the given destination operand.
   bool Blocks(const InstructionOperand& operand) const {
-    return !IsEliminated() && source().EqualsModuloType(operand);
+    return !IsEliminated() && source() == operand;
   }
 
   // A move is redundant if it's been eliminated or if its source and
   // destination are the same.
   bool IsRedundant() const {
     DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
-    return IsEliminated() || source_.EqualsModuloType(destination_);
+    return IsEliminated() || source_ == destination_;
   }
 
   // We clear both operands to indicate move that's been eliminated.
@@ -603,7 +551,7 @@ class ReferenceMap final : public ZoneObject {
 
 std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
 
-class Instruction final {
+class Instruction {
  public:
   size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
   const InstructionOperand* OutputAt(size_t i) const {
@@ -728,9 +676,10 @@ class Instruction final {
   ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
   ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
 
- private:
+ protected:
   explicit Instruction(InstructionCode opcode);
 
+ private:
   Instruction(InstructionCode opcode, size_t output_count,
               InstructionOperand* outputs, size_t input_count,
               InstructionOperand* inputs, size_t temp_count,
@@ -747,6 +696,7 @@ class Instruction final {
   ReferenceMap* reference_map_;
   InstructionOperand operands_[1];
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(Instruction);
 };
 
@@ -1054,24 +1004,11 @@ class InstructionSequence final : public ZoneObject {
 
   const InstructionBlock* GetInstructionBlock(int instruction_index) const;
 
-  static MachineType DefaultRepresentation() {
-    return kPointerSize == 8 ? kRepWord64 : kRepWord32;
-  }
-  MachineType GetRepresentation(int virtual_register) const;
-  void MarkAsRepresentation(MachineType machine_type, int virtual_register);
+  bool IsReference(int virtual_register) const;
+  bool IsDouble(int virtual_register) const;
 
-  bool IsReference(int virtual_register) const {
-    return GetRepresentation(virtual_register) == kRepTagged;
-  }
-  bool IsFloat(int virtual_register) const {
-    switch (GetRepresentation(virtual_register)) {
-      case kRepFloat32:
-      case kRepFloat64:
-        return true;
-      default:
-        return false;
-    }
-  }
+  void MarkAsReference(int virtual_register);
+  void MarkAsDouble(int virtual_register);
 
   Instruction* GetBlockStart(RpoNumber rpo) const;
 
@@ -1174,7 +1111,8 @@ class InstructionSequence final : public ZoneObject {
   InstructionDeque instructions_;
   int next_virtual_register_;
   ReferenceMapDeque reference_maps_;
-  ZoneVector<MachineType> representations_;
+  VirtualRegisterSet doubles_;
+  VirtualRegisterSet references_;
   DeoptimizationVector deoptimization_entries_;
 
   DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
index d55005f..ea514aa 100644 (file)
@@ -11,18 +11,8 @@ namespace compiler {
 namespace {
 
 typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
-
-struct MoveKeyCompare {
-  bool operator()(const MoveKey& a, const MoveKey& b) const {
-    if (a.first.EqualsModuloType(b.first)) {
-      return a.second.CompareModuloType(b.second);
-    }
-    return a.first.CompareModuloType(b.first);
-  }
-};
-
-typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
-typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
+typedef ZoneMap<MoveKey, unsigned> MoveMap;
+typedef ZoneSet<InstructionOperand> OperandSet;
 
 
 bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
@@ -234,12 +224,10 @@ bool IsSlot(const InstructionOperand& op) {
 
 
 bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
-  if (!a->source().EqualsModuloType(b->source())) {
-    return a->source().CompareModuloType(b->source());
-  }
+  if (a->source() != b->source()) return a->source() < b->source();
   if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
   if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
-  return a->destination().CompareModuloType(b->destination());
+  return a->destination() < b->destination();
 }
 
 }  // namespace
@@ -264,8 +252,7 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
   MoveOperands* group_begin = nullptr;
   for (auto load : loads) {
     // New group.
-    if (group_begin == nullptr ||
-        !load->source().EqualsModuloType(group_begin->source())) {
+    if (group_begin == nullptr || load->source() != group_begin->source()) {
       group_begin = load;
       continue;
     }
index f23d244..2a749dd 100644 (file)
@@ -163,7 +163,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
           CHECK(false);
           break;
         case UnallocatedOperand::NONE:
-          if (sequence()->IsFloat(vreg)) {
+          if (sequence()->IsDouble(vreg)) {
             constraint->type_ = kNoneDouble;
           } else {
             constraint->type_ = kNone;
@@ -178,14 +178,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
           constraint->value_ = unallocated->fixed_register_index();
           break;
         case UnallocatedOperand::MUST_HAVE_REGISTER:
-          if (sequence()->IsFloat(vreg)) {
+          if (sequence()->IsDouble(vreg)) {
             constraint->type_ = kDoubleRegister;
           } else {
             constraint->type_ = kRegister;
           }
           break;
         case UnallocatedOperand::MUST_HAVE_SLOT:
-          if (sequence()->IsFloat(vreg)) {
+          if (sequence()->IsDouble(vreg)) {
             constraint->type_ = kDoubleSlot;
           } else {
             constraint->type_ = kSlot;
@@ -286,7 +286,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
 struct OperandLess {
   bool operator()(const InstructionOperand* a,
                   const InstructionOperand* b) const {
-    return a->CompareModuloType(*b);
+    return *a < *b;
   }
 };
 
@@ -320,7 +320,7 @@ class OperandMap : public ZoneObject {
           this->erase(it++);
           if (it == this->end()) return;
         }
-        if (it->first->EqualsModuloType(*o.first)) {
+        if (*it->first == *o.first) {
           ++it;
           if (it == this->end()) return;
         } else {
@@ -372,14 +372,13 @@ class OperandMap : public ZoneObject {
   }
 
   void DropRegisters(const RegisterConfiguration* config) {
-    // TODO(dcarney): sort map by kind and drop range.
-    for (auto it = map().begin(); it != map().end();) {
-      auto op = it->first;
-      if (op->IsRegister() || op->IsDoubleRegister()) {
-        map().erase(it++);
-      } else {
-        ++it;
-      }
+    for (int i = 0; i < config->num_general_registers(); ++i) {
+      RegisterOperand op(i);
+      Drop(&op);
+    }
+    for (int i = 0; i < config->num_double_registers(); ++i) {
+      DoubleRegisterOperand op(i);
+      Drop(&op);
     }
   }
 
index 9badf79..49229d5 100644 (file)
@@ -89,27 +89,6 @@ bool IsOutputDoubleRegisterOf(Instruction* instr, int index) {
   return false;
 }
 
-
-// TODO(dcarney): fix frame to allow frame accesses to half size location.
-int GetByteWidth(MachineType machine_type) {
-  DCHECK_EQ(RepresentationOf(machine_type), machine_type);
-  switch (machine_type) {
-    case kRepBit:
-    case kRepWord8:
-    case kRepWord16:
-    case kRepWord32:
-    case kRepTagged:
-      return kPointerSize;
-    case kRepFloat32:
-    case kRepWord64:
-    case kRepFloat64:
-      return 8;
-    default:
-      UNREACHABLE();
-      return 0;
-  }
-}
-
 }  // namespace
 
 
@@ -235,7 +214,7 @@ struct LiveRange::SpillAtDefinitionList : ZoneObject {
 };
 
 
-LiveRange::LiveRange(int id, MachineType machine_type)
+LiveRange::LiveRange(int id)
     : id_(id),
       spill_start_index_(kMaxInt),
       bits_(0),
@@ -249,10 +228,8 @@ LiveRange::LiveRange(int id, MachineType machine_type)
       current_interval_(nullptr),
       last_processed_use_(nullptr),
       current_hint_position_(nullptr) {
-  DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
   bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
-          AssignedRegisterField::encode(kUnassignedRegister) |
-          MachineTypeField::encode(machine_type);
+          AssignedRegisterField::encode(kUnassignedRegister);
 }
 
 
@@ -291,18 +268,6 @@ void LiveRange::Spill() {
 }
 
 
-RegisterKind LiveRange::kind() const {
-  switch (RepresentationOf(machine_type())) {
-    case kRepFloat32:
-    case kRepFloat64:
-      return DOUBLE_REGISTERS;
-    default:
-      break;
-  }
-  return GENERAL_REGISTERS;
-}
-
-
 void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
                                   InstructionOperand* operand) {
   DCHECK(HasNoSpillType());
@@ -312,9 +277,9 @@ void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
 
 
 void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
-                                         const InstructionOperand& op,
+                                         InstructionOperand* op,
                                          bool might_be_duplicated) {
-  DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
+  DCHECK_IMPLIES(op->IsConstant(), spills_at_definition_ == nullptr);
   DCHECK(!IsChild());
   auto zone = sequence->zone();
   for (auto to_spill = spills_at_definition_; to_spill != nullptr;
@@ -327,15 +292,15 @@ void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
       bool found = false;
       for (auto move_op : *move) {
         if (move_op->IsEliminated()) continue;
-        if (move_op->source().Equals(*to_spill->operand) &&
-            move_op->destination().Equals(op)) {
+        if (move_op->source() == *to_spill->operand &&
+            move_op->destination() == *op) {
           found = true;
           break;
         }
       }
       if (found) continue;
     }
-    move->AddMove(*to_spill->operand, op);
+    move->AddMove(*to_spill->operand, *op);
   }
 }
 
@@ -364,6 +329,14 @@ void LiveRange::SetSpillRange(SpillRange* spill_range) {
 }
 
 
+void LiveRange::CommitSpillOperand(AllocatedOperand* operand) {
+  DCHECK(HasSpillRange());
+  DCHECK(!IsChild());
+  set_spill_type(SpillType::kSpillOperand);
+  spill_operand_ = operand;
+}
+
+
 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
   UsePosition* use_pos = last_processed_use_;
   if (use_pos == nullptr || use_pos->pos() > start) {
@@ -422,33 +395,18 @@ InstructionOperand LiveRange::GetAssignedOperand() const {
     DCHECK(!spilled());
     switch (kind()) {
       case GENERAL_REGISTERS:
-        return RegisterOperand(machine_type(), assigned_register());
+        return RegisterOperand(assigned_register());
       case DOUBLE_REGISTERS:
-        return DoubleRegisterOperand(machine_type(), assigned_register());
+        return DoubleRegisterOperand(assigned_register());
+      default:
+        UNREACHABLE();
     }
   }
   DCHECK(spilled());
   DCHECK(!HasRegisterAssigned());
-  if (TopLevel()->HasSpillOperand()) {
-    auto op = TopLevel()->GetSpillOperand();
-    DCHECK(!op->IsUnallocated());
-    return *op;
-  }
-  return TopLevel()->GetSpillRangeOperand();
-}
-
-
-AllocatedOperand LiveRange::GetSpillRangeOperand() const {
-  auto spill_range = GetSpillRange();
-  int index = spill_range->assigned_slot();
-  switch (kind()) {
-    case GENERAL_REGISTERS:
-      return StackSlotOperand(machine_type(), index);
-    case DOUBLE_REGISTERS:
-      return DoubleStackSlotOperand(machine_type(), index);
-  }
-  UNREACHABLE();
-  return StackSlotOperand(kMachNone, 0);
+  auto op = TopLevel()->GetSpillOperand();
+  DCHECK(!op->IsUnallocated());
+  return *op;
 }
 
 
@@ -554,6 +512,7 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
   // Link the new live range in the chain before any of the other
   // ranges linked from the range before the split.
   result->parent_ = (parent_ == nullptr) ? this : parent_;
+  result->set_kind(result->parent_->kind());
   result->next_ = next_;
   next_ = result;
 
@@ -667,14 +626,15 @@ void LiveRange::AddUsePosition(UsePosition* use_pos) {
 
 
 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
-                                     const InstructionOperand& spill_op) {
+                                     InstructionOperand* spill_op) {
   for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
     if (!pos->HasOperand()) continue;
     switch (pos->type()) {
       case UsePositionType::kRequiresSlot:
-        DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
-        InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
+        if (spill_op != nullptr) {
+          InstructionOperand::ReplaceWith(pos->operand(), spill_op);
+        }
         break;
       case UsePositionType::kRequiresRegister:
         DCHECK(op.IsRegister() || op.IsDoubleRegister());
@@ -766,8 +726,7 @@ static bool AreUseIntervalsIntersecting(UseInterval* interval1,
 }
 
 
-SpillRange::SpillRange(LiveRange* parent, Zone* zone)
-    : live_ranges_(zone), assigned_slot_(kUnassignedSlot) {
+SpillRange::SpillRange(LiveRange* parent, Zone* zone) : live_ranges_(zone) {
   DCHECK(!parent->IsChild());
   UseInterval* result = nullptr;
   UseInterval* node = nullptr;
@@ -793,11 +752,6 @@ SpillRange::SpillRange(LiveRange* parent, Zone* zone)
 }
 
 
-int SpillRange::ByteWidth() const {
-  return GetByteWidth(live_ranges_[0]->machine_type());
-}
-
-
 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
       this->End() <= other->use_interval_->start() ||
@@ -809,11 +763,7 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
 
 
 bool SpillRange::TryMerge(SpillRange* other) {
-  // TODO(dcarney): byte widths should be compared here not kinds.
-  if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
-      IsIntersectingWith(other)) {
-    return false;
-  }
+  if (kind() != other->kind() || IsIntersectingWith(other)) return false;
 
   auto max = LifetimePosition::MaxPosition();
   if (End() < other->End() && other->End() != max) {
@@ -837,6 +787,14 @@ bool SpillRange::TryMerge(SpillRange* other) {
 }
 
 
+void SpillRange::SetOperand(AllocatedOperand* op) {
+  for (auto range : live_ranges()) {
+    DCHECK(range->GetSpillRange() == this);
+    range->CommitSpillOperand(op);
+  }
+}
+
+
 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
   UseInterval* tail = nullptr;
   auto current = use_interval_;
@@ -903,8 +861,7 @@ RegisterAllocationData::RegisterAllocationData(
                                 allocation_zone()),
       spill_ranges_(allocation_zone()),
       assigned_registers_(nullptr),
-      assigned_double_registers_(nullptr),
-      virtual_register_count_(code->VirtualRegisterCount()) {
+      assigned_double_registers_(nullptr) {
   DCHECK(this->config()->num_general_registers() <=
          RegisterConfiguration::kMaxGeneralRegisters);
   DCHECK(this->config()->num_double_registers() <=
@@ -919,49 +876,30 @@ RegisterAllocationData::RegisterAllocationData(
 }
 
 
-MoveOperands* RegisterAllocationData::AddGapMove(
-    int index, Instruction::GapPosition position,
-    const InstructionOperand& from, const InstructionOperand& to) {
-  auto instr = code()->InstructionAt(index);
-  auto moves = instr->GetOrCreateParallelMove(position, code_zone());
-  return moves->AddMove(from, to);
-}
-
-
-MachineType RegisterAllocationData::MachineTypeFor(int virtual_register) {
-  DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
-  return code()->GetRepresentation(virtual_register);
-}
-
-
 LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
   if (index >= static_cast<int>(live_ranges().size())) {
     live_ranges().resize(index + 1, nullptr);
   }
   auto result = live_ranges()[index];
   if (result == nullptr) {
-    result = NewLiveRange(index, MachineTypeFor(index));
+    result = NewLiveRange(index);
     live_ranges()[index] = result;
   }
   return result;
 }
 
 
-LiveRange* RegisterAllocationData::NewLiveRange(int index,
-                                                MachineType machine_type) {
-  return new (allocation_zone()) LiveRange(index, machine_type);
+MoveOperands* RegisterAllocationData::AddGapMove(
+    int index, Instruction::GapPosition position,
+    const InstructionOperand& from, const InstructionOperand& to) {
+  auto instr = code()->InstructionAt(index);
+  auto moves = instr->GetOrCreateParallelMove(position, code_zone());
+  return moves->AddMove(from, to);
 }
 
 
-LiveRange* RegisterAllocationData::NewChildRangeFor(LiveRange* range) {
-  int vreg = virtual_register_count_++;
-  if (vreg >= static_cast<int>(live_ranges().size())) {
-    live_ranges().resize(vreg + 1, nullptr);
-  }
-  auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type());
-  DCHECK_NULL(live_ranges()[vreg]);
-  live_ranges()[vreg] = child;
-  return child;
+LiveRange* RegisterAllocationData::NewLiveRange(int index) {
+  return new (allocation_zone()) LiveRange(index);
 }
 
 
@@ -1034,21 +972,15 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
   DCHECK(operand->HasFixedPolicy());
   InstructionOperand allocated;
-  MachineType machine_type = InstructionSequence::DefaultRepresentation();
-  int virtual_register = operand->virtual_register();
-  if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
-    machine_type = data()->MachineTypeFor(virtual_register);
-  }
   if (operand->HasFixedSlotPolicy()) {
-    allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, machine_type,
+    allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT,
                                  operand->fixed_slot_index());
   } else if (operand->HasFixedRegisterPolicy()) {
-    allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
+    allocated = AllocatedOperand(AllocatedOperand::REGISTER,
                                  operand->fixed_register_index());
   } else if (operand->HasFixedDoubleRegisterPolicy()) {
-    DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
     allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER,
-                                 machine_type, operand->fixed_register_index());
+                                 operand->fixed_register_index());
   } else {
     UNREACHABLE();
   }
@@ -1316,9 +1248,9 @@ LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
   DCHECK(index < config()->num_general_registers());
   auto result = data()->fixed_live_ranges()[index];
   if (result == nullptr) {
-    result = data()->NewLiveRange(FixedLiveRangeID(index),
-                                  InstructionSequence::DefaultRepresentation());
+    result = data()->NewLiveRange(FixedLiveRangeID(index));
     DCHECK(result->IsFixed());
+    result->set_kind(GENERAL_REGISTERS);
     result->set_assigned_register(index);
     data()->MarkAllocated(GENERAL_REGISTERS, index);
     data()->fixed_live_ranges()[index] = result;
@@ -1331,8 +1263,9 @@ LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
   DCHECK(index < config()->num_aliased_double_registers());
   auto result = data()->fixed_double_live_ranges()[index];
   if (result == nullptr) {
-    result = data()->NewLiveRange(FixedDoubleLiveRangeID(index), kRepFloat64);
+    result = data()->NewLiveRange(FixedDoubleLiveRangeID(index));
     DCHECK(result->IsFixed());
+    result->set_kind(DOUBLE_REGISTERS);
     result->set_assigned_register(index);
     data()->MarkAllocated(DOUBLE_REGISTERS, index);
     data()->fixed_double_live_ranges()[index] = result;
@@ -1632,6 +1565,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
   // Postprocess the ranges.
   for (auto range : data()->live_ranges()) {
     if (range == nullptr) continue;
+    range->set_kind(RequiredRegisterKind(range->id()));
     // Give slots to all ranges with a non fixed slot use.
     if (range->has_slot_use() && range->HasNoSpillType()) {
       data()->AssignSpillRangeToLiveRange(range);
@@ -1676,6 +1610,13 @@ void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
 }
 
 
+RegisterKind LiveRangeBuilder::RequiredRegisterKind(
+    int virtual_register) const {
+  return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
+                                              : GENERAL_REGISTERS;
+}
+
+
 void LiveRangeBuilder::Verify() const {
   for (auto& hint : phi_hints_) {
     CHECK(hint.second->IsResolved());
@@ -1706,7 +1647,8 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
           pos.ToInstructionIndex()));
 
-  auto result = data()->NewChildRangeFor(range);
+  int vreg = code()->NextVirtualRegister();
+  auto result = LiveRangeFor(vreg);
   range->SplitAt(pos, result, allocation_zone());
   return result;
 }
@@ -2710,9 +2652,13 @@ void OperandAssigner::AssignSpillSlots() {
   for (auto range : spill_ranges) {
     if (range->IsEmpty()) continue;
     // Allocate a new operand referring to the spill slot.
-    int byte_width = range->ByteWidth();
-    int index = data()->frame()->AllocateSpillSlot(byte_width);
-    range->set_assigned_slot(index);
+    auto kind = range->kind();
+    int index = data()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+    auto op_kind = kind == DOUBLE_REGISTERS
+                       ? AllocatedOperand::DOUBLE_STACK_SLOT
+                       : AllocatedOperand::STACK_SLOT;
+    auto op = AllocatedOperand::New(data()->code_zone(), op_kind, index);
+    range->SetOperand(op);
   }
 }
 
@@ -2720,18 +2666,16 @@ void OperandAssigner::AssignSpillSlots() {
 void OperandAssigner::CommitAssignment() {
   for (auto range : data()->live_ranges()) {
     if (range == nullptr || range->IsEmpty()) continue;
-    InstructionOperand spill_operand;
-    if (range->TopLevel()->HasSpillOperand()) {
-      spill_operand = *range->TopLevel()->GetSpillOperand();
-    } else if (range->TopLevel()->HasSpillRange()) {
-      spill_operand = range->TopLevel()->GetSpillRangeOperand();
+    InstructionOperand* spill_operand = nullptr;
+    if (!range->TopLevel()->HasNoSpillType()) {
+      spill_operand = range->TopLevel()->GetSpillOperand();
     }
     auto assigned = range->GetAssignedOperand();
     range->ConvertUsesToOperand(assigned, spill_operand);
     if (range->is_phi()) {
       data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
     }
-    if (!range->IsChild() && !spill_operand.IsInvalid()) {
+    if (!range->IsChild() && spill_operand != nullptr) {
       range->CommitSpillsAtDefinition(data()->code(), spill_operand,
                                       range->has_slot_use());
     }
@@ -2812,21 +2756,12 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
 
       // Check if the live range is spilled and the safe point is after
       // the spill position.
-      if (((range->HasSpillOperand() &&
-            !range->GetSpillOperand()->IsConstant()) ||
-           range->HasSpillRange()) &&
-          safe_point >= range->spill_start_index()) {
+      if (range->HasSpillOperand() &&
+          safe_point >= range->spill_start_index() &&
+          !range->GetSpillOperand()->IsConstant()) {
         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
               range->id(), range->spill_start_index(), safe_point);
-        InstructionOperand operand;
-        if (range->HasSpillOperand()) {
-          operand = *range->GetSpillOperand();
-        } else {
-          operand = range->GetSpillRangeOperand();
-        }
-        DCHECK(operand.IsStackSlot());
-        DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
-        map->RecordReference(operand);
+        map->RecordReference(*range->GetSpillOperand());
       }
 
       if (!cur->spilled()) {
@@ -2836,7 +2771,6 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
             cur->id(), cur->Start().value(), safe_point);
         auto operand = cur->GetAssignedOperand();
         DCHECK(!operand.IsStackSlot());
-        DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
         map->RecordReference(operand);
       }
     }
@@ -2975,24 +2909,6 @@ class LiveRangeFinder {
   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
 };
 
-
-typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
-
-
-struct DelayedInsertionMapCompare {
-  bool operator()(const DelayedInsertionMapKey& a,
-                  const DelayedInsertionMapKey& b) {
-    if (a.first == b.first) {
-      return a.second.Compare(b.second);
-    }
-    return a.first < b.first;
-  }
-};
-
-
-typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
-                DelayedInsertionMapCompare> DelayedInsertionMap;
-
 }  // namespace
 
 
@@ -3026,7 +2942,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
           continue;
         auto pred_op = result.pred_cover_->GetAssignedOperand();
         auto cur_op = result.cur_cover_->GetAssignedOperand();
-        if (pred_op.Equals(cur_op)) continue;
+        if (pred_op == cur_op) continue;
         ResolveControlFlow(block, cur_op, pred_block, pred_op);
       }
       iterator.Advance();
@@ -3039,7 +2955,7 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
                                             const InstructionOperand& cur_op,
                                             const InstructionBlock* pred,
                                             const InstructionOperand& pred_op) {
-  DCHECK(!pred_op.Equals(cur_op));
+  DCHECK(pred_op != cur_op);
   int gap_index;
   Instruction::GapPosition position;
   if (block->PredecessorCount() == 1) {
@@ -3058,7 +2974,8 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
 
 
 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
-  DelayedInsertionMap delayed_insertion_map(local_zone);
+  ZoneMap<std::pair<ParallelMove*, InstructionOperand>, InstructionOperand>
+      delayed_insertion_map(local_zone);
   for (auto first_range : data()->live_ranges()) {
     if (first_range == nullptr || first_range->IsChild()) continue;
     for (auto second_range = first_range->next(); second_range != nullptr;
@@ -3074,7 +2991,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
       }
       auto prev_operand = first_range->GetAssignedOperand();
       auto cur_operand = second_range->GetAssignedOperand();
-      if (prev_operand.Equals(cur_operand)) continue;
+      if (prev_operand == cur_operand) continue;
       bool delay_insertion = false;
       Instruction::GapPosition gap_pos;
       int gap_index = pos.ToInstructionIndex();
index 41f00af..50b89ed 100644 (file)
@@ -13,6 +13,7 @@ namespace internal {
 namespace compiler {
 
 enum RegisterKind {
+  UNALLOCATED_REGISTERS,
   GENERAL_REGISTERS,
   DOUBLE_REGISTERS
 };
@@ -271,7 +272,7 @@ class SpillRange;
 // intervals over the instruction ordering.
 class LiveRange final : public ZoneObject {
  public:
-  explicit LiveRange(int id, MachineType machine_type);
+  explicit LiveRange(int id);
 
   UseInterval* first_interval() const { return first_interval_; }
   UsePosition* first_pos() const { return first_pos_; }
@@ -288,8 +289,6 @@ class LiveRange final : public ZoneObject {
   InstructionOperand GetAssignedOperand() const;
   int spill_start_index() const { return spill_start_index_; }
 
-  MachineType machine_type() const { return MachineTypeField::decode(bits_); }
-
   int assigned_register() const { return AssignedRegisterField::decode(bits_); }
   bool HasRegisterAssigned() const {
     return assigned_register() != kUnassignedRegister;
@@ -300,7 +299,10 @@ class LiveRange final : public ZoneObject {
   bool spilled() const { return SpilledField::decode(bits_); }
   void Spill();
 
-  RegisterKind kind() const;
+  RegisterKind kind() const { return RegisterKindField::decode(bits_); }
+  void set_kind(RegisterKind kind) {
+    bits_ = RegisterKindField::update(bits_, kind);
+  }
 
   // Correct only for parent.
   bool is_phi() const { return IsPhiField::decode(bits_); }
@@ -384,14 +386,14 @@ class LiveRange final : public ZoneObject {
     return spill_type() == SpillType::kSpillOperand;
   }
   bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
-  AllocatedOperand GetSpillRangeOperand() const;
 
   void SpillAtDefinition(Zone* zone, int gap_index,
                          InstructionOperand* operand);
   void SetSpillOperand(InstructionOperand* operand);
   void SetSpillRange(SpillRange* spill_range);
+  void CommitSpillOperand(AllocatedOperand* operand);
   void CommitSpillsAtDefinition(InstructionSequence* sequence,
-                                const InstructionOperand& operand,
+                                InstructionOperand* operand,
                                 bool might_be_duplicated);
 
   void SetSpillStartIndex(int start) {
@@ -414,7 +416,7 @@ class LiveRange final : public ZoneObject {
   void Verify() const;
 
   void ConvertUsesToOperand(const InstructionOperand& op,
-                            const InstructionOperand& spill_op);
+                            InstructionOperand* spill_op);
   void SetUseHints(int register_index);
   void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
 
@@ -435,9 +437,9 @@ class LiveRange final : public ZoneObject {
   typedef BitField<bool, 1, 1> HasSlotUseField;
   typedef BitField<bool, 2, 1> IsPhiField;
   typedef BitField<bool, 3, 1> IsNonLoopPhiField;
-  typedef BitField<SpillType, 4, 2> SpillTypeField;
-  typedef BitField<int32_t, 6, 6> AssignedRegisterField;
-  typedef BitField<MachineType, 12, 15> MachineTypeField;
+  typedef BitField<RegisterKind, 4, 2> RegisterKindField;
+  typedef BitField<SpillType, 6, 2> SpillTypeField;
+  typedef BitField<int32_t, 8, 6> AssignedRegisterField;
 
   int id_;
   int spill_start_index_;
@@ -466,23 +468,13 @@ class LiveRange final : public ZoneObject {
 
 class SpillRange final : public ZoneObject {
  public:
-  static const int kUnassignedSlot = -1;
   SpillRange(LiveRange* range, Zone* zone);
 
   UseInterval* interval() const { return use_interval_; }
-  // Currently, only 4 or 8 byte slots are supported.
-  int ByteWidth() const;
+  RegisterKind kind() const { return live_ranges_[0]->kind(); }
   bool IsEmpty() const { return live_ranges_.empty(); }
   bool TryMerge(SpillRange* other);
-
-  void set_assigned_slot(int index) {
-    DCHECK_EQ(kUnassignedSlot, assigned_slot_);
-    assigned_slot_ = index;
-  }
-  int assigned_slot() {
-    DCHECK_NE(kUnassignedSlot, assigned_slot_);
-    return assigned_slot_;
-  }
+  void SetOperand(AllocatedOperand* op);
 
  private:
   LifetimePosition End() const { return end_position_; }
@@ -494,7 +486,6 @@ class SpillRange final : public ZoneObject {
   ZoneVector<LiveRange*> live_ranges_;
   UseInterval* use_interval_;
   LifetimePosition end_position_;
-  int assigned_slot_;
 
   DISALLOW_COPY_AND_ASSIGN(SpillRange);
 };
@@ -558,12 +549,7 @@ class RegisterAllocationData final : public ZoneObject {
   const char* debug_name() const { return debug_name_; }
   const RegisterConfiguration* config() const { return config_; }
 
-  MachineType MachineTypeFor(int virtual_register);
-
   LiveRange* LiveRangeFor(int index);
-  // Creates a new live range.
-  LiveRange* NewLiveRange(int index, MachineType machine_type);
-  LiveRange* NewChildRangeFor(LiveRange* range);
 
   SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
 
@@ -577,6 +563,9 @@ class RegisterAllocationData final : public ZoneObject {
 
   bool ExistsUseWithoutDefinition();
 
+  // Creates a new live range.
+  LiveRange* NewLiveRange(int index);
+
   void MarkAllocated(RegisterKind kind, int index);
 
   PhiMapValue* InitializePhiMap(const InstructionBlock* block,
@@ -597,7 +586,6 @@ class RegisterAllocationData final : public ZoneObject {
   ZoneVector<SpillRange*> spill_ranges_;
   BitVector* assigned_registers_;
   BitVector* assigned_double_registers_;
-  int virtual_register_count_;
 
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
 };
@@ -676,6 +664,9 @@ class LiveRangeBuilder final : public ZoneObject {
   void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
   void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
 
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
   UsePosition* NewUsePosition(LifetimePosition pos, InstructionOperand* operand,
                               void* hint, UsePositionHintType hint_type);
   UsePosition* NewUsePosition(LifetimePosition pos) {
index a081ddb..09a7154 100644 (file)
@@ -89,8 +89,7 @@ class InterpreterState {
     if (key.is_constant) {
       return ConstantOperand(key.index);
     }
-    return AllocatedOperand(
-        key.kind, InstructionSequence::DefaultRepresentation(), key.index);
+    return AllocatedOperand(key.kind, key.index);
   }
 
   friend std::ostream& operator<<(std::ostream& os,
@@ -149,7 +148,7 @@ class ParallelMoveCreator : public HandleAndZoneScope {
 
   ParallelMove* Create(int size) {
     ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
-    std::set<InstructionOperand, CompareOperandModuloType> seen;
+    std::set<InstructionOperand> seen;
     for (int i = 0; i < size; ++i) {
       MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
       if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
@@ -161,38 +160,18 @@ class ParallelMoveCreator : public HandleAndZoneScope {
   }
 
  private:
-  MachineType RandomType() {
-    int index = rng_->NextInt(3);
-    switch (index) {
-      case 0:
-        return kRepWord32;
-      case 1:
-        return kRepWord64;
-      case 2:
-        return kRepTagged;
-    }
-    UNREACHABLE();
-    return kMachNone;
-  }
-
-  MachineType RandomDoubleType() {
-    int index = rng_->NextInt(2);
-    if (index == 0) return kRepFloat64;
-    return kRepFloat32;
-  }
-
   InstructionOperand CreateRandomOperand(bool is_source) {
     int index = rng_->NextInt(6);
     // destination can't be Constant.
     switch (rng_->NextInt(is_source ? 5 : 4)) {
       case 0:
-        return StackSlotOperand(RandomType(), index);
+        return StackSlotOperand(index);
       case 1:
-        return DoubleStackSlotOperand(RandomDoubleType(), index);
+        return DoubleStackSlotOperand(index);
       case 2:
-        return RegisterOperand(RandomType(), index);
+        return RegisterOperand(index);
       case 3:
-        return DoubleRegisterOperand(RandomDoubleType(), index);
+        return DoubleRegisterOperand(index);
       case 4:
         return ConstantOperand(index);
     }
index d3c4577..22ee613 100644 (file)
@@ -263,8 +263,8 @@ TEST(InstructionAddGapMove) {
     CHECK(move);
     CHECK_EQ(1u, move->size());
     MoveOperands* cur = move->at(0);
-    CHECK(op1.Equals(cur->source()));
-    CHECK(op2.Equals(cur->destination()));
+    CHECK(op1 == cur->source());
+    CHECK(op2 == cur->destination());
   }
 }
 
@@ -308,15 +308,15 @@ TEST(InstructionOperands) {
         CHECK(k == m->TempCount());
 
         for (size_t z = 0; z < i; z++) {
-          CHECK(outputs[z].Equals(*m->OutputAt(z)));
+          CHECK(outputs[z] == *m->OutputAt(z));
         }
 
         for (size_t z = 0; z < j; z++) {
-          CHECK(inputs[z].Equals(*m->InputAt(z)));
+          CHECK(inputs[z] == *m->InputAt(z));
         }
 
         for (size_t z = 0; z < k; z++) {
-          CHECK(temps[z].Equals(*m->TempAt(z)));
+          CHECK(temps[z] == *m->TempAt(z));
         }
       }
     }
index e0a4a2f..c1b9f95 100644 (file)
@@ -59,14 +59,13 @@ class TestCode : public HandleAndZoneScope {
     Start();
     sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
     int index = static_cast<int>(sequence_.instructions().size()) - 1;
-    AddGapMove(index, RegisterOperand(kRepWord32, 13),
-               RegisterOperand(kRepWord32, 13));
+    AddGapMove(index, RegisterOperand(13), RegisterOperand(13));
   }
   void NonRedundantMoves() {
     Start();
     sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
     int index = static_cast<int>(sequence_.instructions().size()) - 1;
-    AddGapMove(index, ConstantOperand(11), RegisterOperand(kRepWord32, 11));
+    AddGapMove(index, ConstantOperand(11), RegisterOperand(11));
   }
   void Other() {
     Start();
index 645b0f6..01aa80a 100644 (file)
@@ -96,12 +96,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
   }
   for (auto i : s.virtual_registers_) {
     int const virtual_register = i.second;
-    if (sequence.IsFloat(virtual_register)) {
+    if (sequence.IsDouble(virtual_register)) {
       EXPECT_FALSE(sequence.IsReference(virtual_register));
       s.doubles_.insert(virtual_register);
     }
     if (sequence.IsReference(virtual_register)) {
-      EXPECT_FALSE(sequence.IsFloat(virtual_register));
+      EXPECT_FALSE(sequence.IsDouble(virtual_register));
       s.references_.insert(virtual_register);
     }
   }
index 975706a..c82f427 100644 (file)
@@ -33,7 +33,7 @@ class MoveOptimizerTest : public InstructionSequenceTest {
     auto to = ConvertMoveArg(to_op);
     for (auto move : *moves) {
       if (move->IsRedundant()) continue;
-      if (move->source().Equals(from) && move->destination().Equals(to)) {
+      if (move->source() == from && move->destination() == to) {
         return true;
       }
     }
@@ -67,10 +67,10 @@ class MoveOptimizerTest : public InstructionSequenceTest {
       case kConstant:
         return ConstantOperand(op.value_);
       case kFixedSlot:
-        return StackSlotOperand(kRepWord32, op.value_);
+        return StackSlotOperand(op.value_);
       case kFixedRegister:
         CHECK(0 <= op.value_ && op.value_ < num_general_registers());
-        return RegisterOperand(kRepWord32, op.value_);
+        return RegisterOperand(op.value_);
       default:
         break;
     }